From 0c6d53b7d6108b00f2cfb5116ee810fef0e10a47 Mon Sep 17 00:00:00 2001 From: Alex Crawford Date: Mon, 24 Sep 2018 18:45:32 -0700 Subject: [PATCH 1/7] *: remove bazel This removes all of the files which are directly required by bazel. --- BUILD.bazel | 140 ------------ WORKSPACE | 40 ---- bazel-rules/BUILD.bazel | 0 bazel-rules/gen_test.bzl | 28 --- buildvars.sh | 6 - cmd/openshift-install/BUILD.bazel | 25 --- data/BUILD.bazel | 17 -- examples/BUILD.bazel | 9 - installer/BUILD.bazel | 10 - installer/cmd/tectonic/BUILD.bazel | 24 -- installer/pkg/config-generator/BUILD.bazel | 43 ---- installer/pkg/copy/BUILD.bazel | 15 -- installer/pkg/validate/BUILD.bazel | 20 -- installer/pkg/workflow/BUILD.bazel | 39 ---- pkg/asset/BUILD.bazel | 28 --- pkg/asset/cluster/BUILD.bazel | 23 -- pkg/asset/ignition/BUILD.bazel | 43 ---- pkg/asset/ignition/content/BUILD.bazel | 13 -- pkg/asset/installconfig/BUILD.bazel | 38 ---- pkg/asset/kubeconfig/BUILD.bazel | 29 --- pkg/asset/manifests/BUILD.bazel | 41 ---- .../manifests/content/bootkube/BUILD.bazel | 32 --- .../content/tectonic/ingress/BUILD.bazel | 12 - .../content/tectonic/rbac/BUILD.bazel | 13 -- .../content/tectonic/secrets/BUILD.bazel | 12 - .../content/tectonic/security/BUILD.bazel | 8 - .../content/tectonic/updater/BUILD.bazel | 11 - .../tectonic/updater/appversions/BUILD.bazel | 13 -- .../tectonic/updater/operators/BUILD.bazel | 12 - pkg/asset/metadata/BUILD.bazel | 18 -- pkg/asset/stock/BUILD.bazel | 20 -- pkg/asset/tls/BUILD.bazel | 39 ---- pkg/destroy/BUILD.bazel | 17 -- pkg/destroy/libvirt/BUILD.bazel | 13 -- pkg/ipnet/BUILD.bazel | 14 -- pkg/rhcos/BUILD.bazel | 16 -- pkg/terraform/BUILD.bazel | 13 -- pkg/types/BUILD.bazel | 17 -- pkg/types/config/BUILD.bazel | 36 --- pkg/types/config/aws/BUILD.bazel | 8 - pkg/types/config/libvirt/BUILD.bazel | 16 -- tests/smoke/BUILD.bazel | 22 -- .../vendor/cloud.google.com/go/BUILD.bazel | 9 - .../go/compute/metadata/BUILD.bazel | 14 -- .../cloud.google.com/go/internal/BUILD.bazel | 9 - .../Azure/go-autorest/autorest/BUILD.bazel | 18 -- .../go-autorest/autorest/azure/BUILD.bazel | 22 -- .../go-autorest/autorest/date/BUILD.bazel | 14 -- .../github.com/PuerkitoBio/purell/BUILD.bazel | 15 -- .../github.com/PuerkitoBio/urlesc/BUILD.bazel | 9 - .../beorn7/perks/quantile/BUILD.bazel | 9 - .../coreos/ktestutil/testworkload/BUILD.bazel | 21 -- .../davecgh/go-spew/spew/BUILD.bazel | 17 -- .../github.com/dgrijalva/jwt-go/BUILD.bazel | 24 -- .../docker/distribution/BUILD.bazel | 21 -- .../docker/distribution/digest/BUILD.bazel | 15 -- .../docker/distribution/reference/BUILD.bazel | 13 -- .../github.com/docker/engine-api/BUILD.bazel | 9 - .../docker/engine-api/client/BUILD.bazel | 83 ------- .../engine-api/client/transport/BUILD.bazel | 13 -- .../client/transport/cancellable/BUILD.bazel | 17 -- .../docker/engine-api/types/BUILD.bazel | 24 -- .../engine-api/types/blkiodev/BUILD.bazel | 9 - .../engine-api/types/container/BUILD.bazel | 20 -- .../engine-api/types/filters/BUILD.bazel | 10 - .../engine-api/types/network/BUILD.bazel | 9 - .../engine-api/types/reference/BUILD.bazel | 10 - .../engine-api/types/registry/BUILD.bazel | 9 - .../engine-api/types/strslice/BUILD.bazel | 9 - .../docker/engine-api/types/time/BUILD.bazel | 9 - .../engine-api/types/versions/BUILD.bazel | 9 - .../docker/go-connections/BUILD.bazel | 9 - .../docker/go-connections/nat/BUILD.bazel | 13 -- .../docker/go-connections/sockets/BUILD.bazel | 37 ---- .../go-connections/tlsconfig/BUILD.bazel | 14 -- .../github.com/docker/go-units/BUILD.bazel | 13 -- .../emicklei/go-restful-swagger12/BUILD.bazel | 24 -- .../emicklei/go-restful/BUILD.bazel | 37 ---- .../emicklei/go-restful/log/BUILD.bazel | 9 - .../github.com/evanphx/json-patch/BUILD.bazel | 12 - .../exponent-io/jsonpath/BUILD.bazel | 13 -- .../github.com/fatih/camelcase/BUILD.bazel | 9 - .../vendor/github.com/ghodss/yaml/BUILD.bazel | 13 -- .../go-openapi/analysis/BUILD.bazel | 14 -- .../go-openapi/jsonpointer/BUILD.bazel | 10 - .../go-openapi/jsonreference/BUILD.bazel | 13 -- .../github.com/go-openapi/loads/BUILD.bazel | 14 -- .../github.com/go-openapi/spec/BUILD.bazel | 36 --- .../github.com/go-openapi/swag/BUILD.bazel | 21 -- .../gogo/protobuf/proto/BUILD.bazel | 33 --- .../gogo/protobuf/sortkeys/BUILD.bazel | 9 - .../vendor/github.com/golang/glog/BUILD.bazel | 12 - .../github.com/golang/groupcache/BUILD.bazel | 22 -- .../golang/groupcache/lru/BUILD.bazel | 9 - .../golang/protobuf/proto/BUILD.bazel | 21 -- .../github.com/google/gofuzz/BUILD.bazel | 12 - .../hashicorp/golang-lru/BUILD.bazel | 14 -- .../golang-lru/simplelru/BUILD.bazel | 9 - .../github.com/howeyc/gopass/BUILD.bazel | 49 ----- .../github.com/imdario/mergo/BUILD.bazel | 14 -- .../inconshreveable/mousetrap/BUILD.bazel | 13 -- .../github.com/juju/ratelimit/BUILD.bazel | 12 - .../github.com/mailru/easyjson/BUILD.bazel | 16 -- .../mailru/easyjson/buffer/BUILD.bazel | 9 - .../mailru/easyjson/jlexer/BUILD.bazel | 12 - .../mailru/easyjson/jwriter/BUILD.bazel | 10 - .../pbutil/BUILD.bazel | 14 -- .../github.com/pborman/uuid/BUILD.bazel | 20 -- .../client_golang/prometheus/BUILD.bazel | 37 ---- .../prometheus/promhttp/BUILD.bazel | 21 -- .../prometheus/client_model/go/BUILD.bazel | 10 - .../prometheus/common/expfmt/BUILD.bazel | 22 -- .../bitbucket.org/ww/goautoneg/BUILD.bazel | 9 - .../prometheus/common/model/BUILD.bazel | 21 -- .../github.com/prometheus/procfs/BUILD.bazel | 23 -- .../prometheus/procfs/xfs/BUILD.bazel | 12 - .../vendor/github.com/spf13/cobra/BUILD.bazel | 23 -- .../github.com/spf13/cobra/doc/BUILD.bazel | 18 -- .../vendor/github.com/spf13/pflag/BUILD.bazel | 36 --- .../github.com/ugorji/go/codec/BUILD.bazel | 34 --- .../golang.org/x/crypto/ssh/BUILD.bazel | 34 --- .../golang.org/x/crypto/ssh/agent/BUILD.bazel | 18 -- .../x/crypto/ssh/terminal/BUILD.bazel | 23 -- .../golang.org/x/net/context/BUILD.bazel | 13 -- .../x/net/context/ctxhttp/BUILD.bazel | 13 -- .../vendor/golang.org/x/net/http2/BUILD.bazel | 38 ---- .../golang.org/x/net/http2/hpack/BUILD.bazel | 14 -- .../vendor/golang.org/x/net/idna/BUILD.bazel | 12 - .../golang.org/x/net/lex/httplex/BUILD.bazel | 10 - .../vendor/golang.org/x/oauth2/BUILD.bazel | 17 -- .../golang.org/x/oauth2/google/BUILD.bazel | 23 -- .../golang.org/x/oauth2/internal/BUILD.bazel | 14 -- .../golang.org/x/oauth2/jws/BUILD.bazel | 9 - .../golang.org/x/oauth2/jwt/BUILD.bazel | 15 -- .../vendor/golang.org/x/sys/unix/BUILD.bazel | 168 -------------- .../vendor/golang.org/x/text/BUILD.bazel | 9 - .../golang.org/x/text/cases/BUILD.bazel | 22 -- .../golang.org/x/text/encoding/BUILD.bazel | 13 -- .../x/text/encoding/internal/BUILD.bazel | 14 -- .../encoding/internal/identifier/BUILD.bazel | 12 - .../x/text/encoding/unicode/BUILD.bazel | 20 -- .../x/text/internal/tag/BUILD.bazel | 9 - .../x/text/internal/utf8internal/BUILD.bazel | 9 - .../golang.org/x/text/language/BUILD.bazel | 22 -- .../golang.org/x/text/runes/BUILD.bazel | 13 -- .../x/text/secure/bidirule/BUILD.bazel | 13 -- .../x/text/secure/precis/BUILD.bazel | 28 --- .../golang.org/x/text/transform/BUILD.bazel | 9 - .../x/text/unicode/bidi/BUILD.bazel | 16 -- .../x/text/unicode/norm/BUILD.bazel | 20 -- .../golang.org/x/text/width/BUILD.bazel | 16 -- .../google.golang.org/appengine/BUILD.bazel | 23 -- .../appengine/internal/BUILD.bazel | 27 --- .../internal/app_identity/BUILD.bazel | 10 - .../appengine/internal/base/BUILD.bazel | 10 - .../appengine/internal/datastore/BUILD.bazel | 10 - .../appengine/internal/log/BUILD.bazel | 10 - .../appengine/internal/modules/BUILD.bazel | 10 - .../appengine/internal/remote_api/BUILD.bazel | 10 - .../appengine/internal/urlfetch/BUILD.bazel | 10 - .../appengine/urlfetch/BUILD.bazel | 15 -- .../smoke/vendor/gopkg.in/inf.v0/BUILD.bazel | 12 - .../smoke/vendor/gopkg.in/yaml.v2/BUILD.bazel | 23 -- .../apimachinery/pkg/api/equality/BUILD.bazel | 16 -- .../apimachinery/pkg/api/errors/BUILD.bazel | 18 -- .../apimachinery/pkg/api/meta/BUILD.bazel | 33 --- .../apimachinery/pkg/api/resource/BUILD.bazel | 24 -- .../pkg/api/validation/BUILD.bazel | 23 -- .../apimachinery/pkg/apimachinery/BUILD.bazel | 17 -- .../pkg/apimachinery/announced/BUILD.bazel | 21 -- .../pkg/apimachinery/registered/BUILD.bazel | 16 -- .../apimachinery/pkg/apis/meta/v1/BUILD.bazel | 45 ---- .../pkg/apis/meta/v1/unstructured/BUILD.bazel | 19 -- .../pkg/apis/meta/v1/validation/BUILD.bazel | 14 -- .../pkg/apis/meta/v1alpha1/BUILD.bazel | 24 -- .../apimachinery/pkg/conversion/BUILD.bazel | 16 -- .../pkg/conversion/queryparams/BUILD.bazel | 12 - .../pkg/conversion/unstructured/BUILD.bazel | 19 -- .../apimachinery/pkg/fields/BUILD.bazel | 15 -- .../apimachinery/pkg/labels/BUILD.bazel | 19 -- .../apimachinery/pkg/openapi/BUILD.bazel | 16 -- .../apimachinery/pkg/runtime/BUILD.bazel | 34 --- .../pkg/runtime/schema/BUILD.bazel | 14 -- .../pkg/runtime/serializer/BUILD.bazel | 21 -- .../pkg/runtime/serializer/json/BUILD.bazel | 21 -- .../runtime/serializer/protobuf/BUILD.bazel | 19 -- .../runtime/serializer/recognizer/BUILD.bazel | 13 -- .../runtime/serializer/streaming/BUILD.bazel | 13 -- .../runtime/serializer/versioning/BUILD.bazel | 14 -- .../apimachinery/pkg/selection/BUILD.bazel | 9 - .../k8s.io/apimachinery/pkg/types/BUILD.bazel | 15 -- .../apimachinery/pkg/util/cache/BUILD.bazel | 13 -- .../apimachinery/pkg/util/clock/BUILD.bazel | 9 - .../apimachinery/pkg/util/diff/BUILD.bazel | 13 -- .../apimachinery/pkg/util/errors/BUILD.bazel | 12 - .../apimachinery/pkg/util/framer/BUILD.bazel | 9 - .../apimachinery/pkg/util/intstr/BUILD.bazel | 19 -- .../apimachinery/pkg/util/json/BUILD.bazel | 9 - .../pkg/util/mergepatch/BUILD.bazel | 16 -- .../apimachinery/pkg/util/net/BUILD.bazel | 20 -- .../apimachinery/pkg/util/rand/BUILD.bazel | 9 - .../apimachinery/pkg/util/runtime/BUILD.bazel | 10 - .../apimachinery/pkg/util/sets/BUILD.bazel | 16 -- .../pkg/util/strategicpatch/BUILD.bazel | 14 -- .../apimachinery/pkg/util/uuid/BUILD.bazel | 13 -- .../pkg/util/validation/BUILD.bazel | 9 - .../pkg/util/validation/field/BUILD.bazel | 16 -- .../apimachinery/pkg/util/wait/BUILD.bazel | 13 -- .../apimachinery/pkg/util/yaml/BUILD.bazel | 13 -- .../apimachinery/pkg/version/BUILD.bazel | 12 - .../k8s.io/apimachinery/pkg/watch/BUILD.bazel | 24 -- .../forked/golang/json/BUILD.bazel | 9 - .../forked/golang/reflect/BUILD.bazel | 12 - .../apiserver/pkg/apis/audit/BUILD.bazel | 22 -- .../authentication/authenticator/BUILD.bazel | 10 - .../authentication/serviceaccount/BUILD.bazel | 10 - .../pkg/authentication/user/BUILD.bazel | 12 - .../pkg/endpoints/request/BUILD.bazel | 22 -- .../k8s.io/apiserver/pkg/features/BUILD.bazel | 10 - .../apiserver/pkg/util/feature/BUILD.bazel | 13 -- .../apiserver/pkg/util/flag/BUILD.bazel | 19 -- .../k8s.io/client-go/discovery/BUILD.bazel | 31 --- .../k8s.io/client-go/dynamic/BUILD.bazel | 28 --- .../k8s.io/client-go/kubernetes/BUILD.bazel | 38 ---- .../client-go/kubernetes/scheme/BUILD.bazel | 38 ---- .../v1alpha1/BUILD.bazel | 24 -- .../kubernetes/typed/apps/v1beta1/BUILD.bazel | 26 --- .../typed/authentication/v1/BUILD.bazel | 21 -- .../typed/authentication/v1beta1/BUILD.bazel | 21 -- .../typed/authorization/v1/BUILD.bazel | 25 --- .../typed/authorization/v1beta1/BUILD.bazel | 25 --- .../typed/autoscaling/v1/BUILD.bazel | 23 -- .../typed/autoscaling/v2alpha1/BUILD.bazel | 23 -- .../kubernetes/typed/batch/v1/BUILD.bazel | 23 -- .../typed/batch/v2alpha1/BUILD.bazel | 23 -- .../typed/certificates/v1beta1/BUILD.bazel | 24 -- .../kubernetes/typed/core/v1/BUILD.bazel | 48 ---- .../typed/extensions/v1beta1/BUILD.bazel | 33 --- .../typed/networking/v1/BUILD.bazel | 23 -- .../typed/policy/v1beta1/BUILD.bazel | 25 --- .../typed/rbac/v1alpha1/BUILD.bazel | 26 --- .../kubernetes/typed/rbac/v1beta1/BUILD.bazel | 26 --- .../typed/settings/v1alpha1/BUILD.bazel | 23 -- .../kubernetes/typed/storage/v1/BUILD.bazel | 23 -- .../typed/storage/v1beta1/BUILD.bazel | 23 -- .../k8s.io/client-go/pkg/api/BUILD.bazel | 35 --- .../k8s.io/client-go/pkg/api/v1/BUILD.bazel | 46 ---- .../client-go/pkg/api/v1/ref/BUILD.bazel | 14 -- .../apis/admissionregistration/BUILD.bazel | 20 -- .../v1alpha1/BUILD.bazel | 30 --- .../client-go/pkg/apis/apps/BUILD.bazel | 22 -- .../pkg/apis/apps/v1beta1/BUILD.bazel | 37 ---- .../pkg/apis/authentication/BUILD.bazel | 20 -- .../pkg/apis/authentication/v1/BUILD.bazel | 29 --- .../apis/authentication/v1beta1/BUILD.bazel | 32 --- .../pkg/apis/authorization/BUILD.bazel | 20 -- .../pkg/apis/authorization/v1/BUILD.bazel | 32 --- .../apis/authorization/v1beta1/BUILD.bazel | 32 --- .../pkg/apis/autoscaling/BUILD.bazel | 23 -- .../pkg/apis/autoscaling/v1/BUILD.bazel | 34 --- .../pkg/apis/autoscaling/v2alpha1/BUILD.bazel | 33 --- .../client-go/pkg/apis/batch/BUILD.bazel | 21 -- .../client-go/pkg/apis/batch/v1/BUILD.bazel | 35 --- .../pkg/apis/batch/v2alpha1/BUILD.bazel | 36 --- .../pkg/apis/certificates/BUILD.bazel | 21 -- .../pkg/apis/certificates/v1beta1/BUILD.bazel | 33 --- .../client-go/pkg/apis/extensions/BUILD.bazel | 24 -- .../pkg/apis/extensions/v1beta1/BUILD.bazel | 38 ---- .../client-go/pkg/apis/networking/BUILD.bazel | 22 -- .../pkg/apis/networking/v1/BUILD.bazel | 35 --- .../client-go/pkg/apis/policy/BUILD.bazel | 21 -- .../pkg/apis/policy/v1beta1/BUILD.bazel | 31 --- .../client-go/pkg/apis/rbac/BUILD.bazel | 22 -- .../pkg/apis/rbac/v1alpha1/BUILD.bazel | 32 --- .../pkg/apis/rbac/v1beta1/BUILD.bazel | 31 --- .../client-go/pkg/apis/settings/BUILD.bazel | 21 -- .../pkg/apis/settings/v1alpha1/BUILD.bazel | 28 --- .../client-go/pkg/apis/storage/BUILD.bazel | 20 -- .../client-go/pkg/apis/storage/v1/BUILD.bazel | 27 --- .../pkg/apis/storage/v1beta1/BUILD.bazel | 30 --- .../k8s.io/client-go/pkg/util/BUILD.bazel | 15 -- .../client-go/pkg/util/parsers/BUILD.bazel | 10 - .../k8s.io/client-go/pkg/version/BUILD.bazel | 14 -- .../plugin/pkg/client/auth/BUILD.bazel | 14 -- .../plugin/pkg/client/auth/azure/BUILD.bazel | 15 -- .../plugin/pkg/client/auth/gcp/BUILD.bazel | 18 -- .../plugin/pkg/client/auth/oidc/BUILD.bazel | 14 -- .../vendor/k8s.io/client-go/rest/BUILD.bazel | 40 ---- .../k8s.io/client-go/rest/watch/BUILD.bazel | 18 -- .../forked/golang/template/BUILD.bazel | 12 - .../k8s.io/client-go/tools/auth/BUILD.bazel | 10 - .../k8s.io/client-go/tools/cache/BUILD.bazel | 46 ---- .../client-go/tools/clientcmd/BUILD.bazel | 35 --- .../client-go/tools/clientcmd/api/BUILD.bazel | 17 -- .../tools/clientcmd/api/latest/BUILD.bazel | 17 -- .../tools/clientcmd/api/v1/BUILD.bazel | 19 -- .../client-go/tools/metrics/BUILD.bazel | 9 - .../k8s.io/client-go/tools/record/BUILD.bazel | 29 --- .../k8s.io/client-go/transport/BUILD.bazel | 18 -- .../k8s.io/client-go/util/cert/BUILD.bazel | 14 -- .../client-go/util/flowcontrol/BUILD.bazel | 17 -- .../k8s.io/client-go/util/homedir/BUILD.bazel | 9 - .../k8s.io/client-go/util/integer/BUILD.bazel | 9 - .../client-go/util/jsonpath/BUILD.bazel | 15 -- .../client-go/util/workqueue/BUILD.bazel | 22 -- .../cmd/kubeadm/app/apis/kubeadm/BUILD.bazel | 20 -- .../federation/apis/federation/BUILD.bazel | 23 -- .../apis/federation/install/BUILD.bazel | 18 -- .../apis/federation/v1beta1/BUILD.bazel | 33 --- .../federation_internalclientset/BUILD.bazel | 23 -- .../scheme/BUILD.bazel | 25 --- .../autoscaling/internalversion/BUILD.bazel | 22 -- .../typed/batch/internalversion/BUILD.bazel | 22 -- .../typed/core/internalversion/BUILD.bazel | 27 --- .../extensions/internalversion/BUILD.bazel | 25 --- .../federation/internalversion/BUILD.bazel | 22 -- .../k8s.io/kubernetes/pkg/api/BUILD.bazel | 35 --- .../kubernetes/pkg/api/events/BUILD.bazel | 10 - .../kubernetes/pkg/api/helper/BUILD.bazel | 19 -- .../kubernetes/pkg/api/helper/qos/BUILD.bazel | 14 -- .../kubernetes/pkg/api/install/BUILD.bazel | 17 -- .../k8s.io/kubernetes/pkg/api/pod/BUILD.bazel | 13 -- .../k8s.io/kubernetes/pkg/api/ref/BUILD.bazel | 14 -- .../kubernetes/pkg/api/resource/BUILD.bazel | 13 -- .../kubernetes/pkg/api/service/BUILD.bazel | 14 -- .../kubernetes/pkg/api/util/BUILD.bazel | 9 - .../k8s.io/kubernetes/pkg/api/v1/BUILD.bazel | 46 ---- .../kubernetes/pkg/api/v1/helper/BUILD.bazel | 16 -- .../pkg/api/v1/helper/qos/BUILD.bazel | 14 -- .../kubernetes/pkg/api/v1/pod/BUILD.bazel | 14 -- .../kubernetes/pkg/api/v1/ref/BUILD.bazel | 14 -- .../kubernetes/pkg/api/validation/BUILD.bazel | 44 ---- .../apis/admissionregistration/BUILD.bazel | 20 -- .../admissionregistration/install/BUILD.bazel | 18 -- .../v1alpha1/BUILD.bazel | 30 --- .../kubernetes/pkg/apis/apps/BUILD.bazel | 22 -- .../pkg/apis/apps/install/BUILD.bazel | 17 -- .../pkg/apis/apps/v1beta1/BUILD.bazel | 37 ---- .../pkg/apis/authentication/BUILD.bazel | 20 -- .../apis/authentication/install/BUILD.bazel | 19 -- .../pkg/apis/authentication/v1/BUILD.bazel | 29 --- .../apis/authentication/v1beta1/BUILD.bazel | 32 --- .../pkg/apis/authorization/BUILD.bazel | 20 -- .../apis/authorization/install/BUILD.bazel | 19 -- .../pkg/apis/authorization/v1/BUILD.bazel | 32 --- .../apis/authorization/v1beta1/BUILD.bazel | 32 --- .../pkg/apis/autoscaling/BUILD.bazel | 23 -- .../pkg/apis/autoscaling/install/BUILD.bazel | 18 -- .../pkg/apis/autoscaling/v1/BUILD.bazel | 34 --- .../pkg/apis/autoscaling/v2alpha1/BUILD.bazel | 33 --- .../kubernetes/pkg/apis/batch/BUILD.bazel | 21 -- .../pkg/apis/batch/install/BUILD.bazel | 18 -- .../kubernetes/pkg/apis/batch/v1/BUILD.bazel | 35 --- .../pkg/apis/batch/v2alpha1/BUILD.bazel | 36 --- .../pkg/apis/certificates/BUILD.bazel | 21 -- .../pkg/apis/certificates/install/BUILD.bazel | 18 -- .../pkg/apis/certificates/v1beta1/BUILD.bazel | 33 --- .../pkg/apis/componentconfig/BUILD.bazel | 24 -- .../apis/componentconfig/install/BUILD.bazel | 17 -- .../apis/componentconfig/v1alpha1/BUILD.bazel | 32 --- .../pkg/apis/extensions/BUILD.bazel | 24 -- .../pkg/apis/extensions/install/BUILD.bazel | 18 -- .../pkg/apis/extensions/v1beta1/BUILD.bazel | 38 ---- .../pkg/apis/networking/BUILD.bazel | 22 -- .../pkg/apis/networking/install/BUILD.bazel | 17 -- .../pkg/apis/networking/v1/BUILD.bazel | 35 --- .../kubernetes/pkg/apis/policy/BUILD.bazel | 21 -- .../pkg/apis/policy/install/BUILD.bazel | 17 -- .../pkg/apis/policy/v1beta1/BUILD.bazel | 31 --- .../kubernetes/pkg/apis/rbac/BUILD.bazel | 22 -- .../pkg/apis/rbac/install/BUILD.bazel | 19 -- .../pkg/apis/rbac/v1alpha1/BUILD.bazel | 32 --- .../pkg/apis/rbac/v1beta1/BUILD.bazel | 31 --- .../kubernetes/pkg/apis/settings/BUILD.bazel | 21 -- .../pkg/apis/settings/install/BUILD.bazel | 17 -- .../pkg/apis/settings/v1alpha1/BUILD.bazel | 28 --- .../kubernetes/pkg/apis/storage/BUILD.bazel | 20 -- .../pkg/apis/storage/install/BUILD.bazel | 19 -- .../pkg/apis/storage/util/BUILD.bazel | 10 - .../pkg/apis/storage/v1/BUILD.bazel | 27 --- .../pkg/apis/storage/v1beta1/BUILD.bazel | 30 --- .../kubernetes/pkg/capabilities/BUILD.bazel | 12 - .../clientset_generated/clientset/BUILD.bazel | 52 ----- .../clientset/scheme/BUILD.bazel | 38 ---- .../v1alpha1/BUILD.bazel | 24 -- .../clientset/typed/apps/v1beta1/BUILD.bazel | 26 --- .../typed/authentication/v1/BUILD.bazel | 21 -- .../typed/authentication/v1beta1/BUILD.bazel | 21 -- .../typed/authorization/v1/BUILD.bazel | 25 --- .../typed/authorization/v1beta1/BUILD.bazel | 25 --- .../typed/autoscaling/v1/BUILD.bazel | 23 -- .../typed/autoscaling/v2alpha1/BUILD.bazel | 23 -- .../clientset/typed/batch/v1/BUILD.bazel | 23 -- .../typed/batch/v2alpha1/BUILD.bazel | 23 -- .../typed/certificates/v1beta1/BUILD.bazel | 24 -- .../clientset/typed/core/v1/BUILD.bazel | 48 ---- .../typed/extensions/v1beta1/BUILD.bazel | 33 --- .../clientset/typed/networking/v1/BUILD.bazel | 23 -- .../typed/policy/v1beta1/BUILD.bazel | 25 --- .../clientset/typed/rbac/v1alpha1/BUILD.bazel | 26 --- .../clientset/typed/rbac/v1beta1/BUILD.bazel | 26 --- .../typed/settings/v1alpha1/BUILD.bazel | 23 -- .../clientset/typed/storage/v1/BUILD.bazel | 23 -- .../typed/storage/v1beta1/BUILD.bazel | 23 -- .../internalclientset/BUILD.bazel | 32 --- .../internalclientset/scheme/BUILD.bazel | 36 --- .../internalversion/BUILD.bazel | 23 -- .../typed/apps/internalversion/BUILD.bazel | 23 -- .../internalversion/BUILD.bazel | 20 -- .../authorization/internalversion/BUILD.bazel | 24 -- .../autoscaling/internalversion/BUILD.bazel | 22 -- .../typed/batch/internalversion/BUILD.bazel | 23 -- .../certificates/internalversion/BUILD.bazel | 23 -- .../typed/core/internalversion/BUILD.bazel | 47 ---- .../extensions/internalversion/BUILD.bazel | 33 --- .../networking/internalversion/BUILD.bazel | 22 -- .../typed/policy/internalversion/BUILD.bazel | 24 -- .../typed/rbac/internalversion/BUILD.bazel | 25 --- .../settings/internalversion/BUILD.bazel | 22 -- .../typed/storage/internalversion/BUILD.bazel | 22 -- .../externalversions/apps/v1beta1/BUILD.bazel | 24 -- .../externalversions/core/v1/BUILD.bazel | 37 ---- .../extensions/v1beta1/BUILD.bazel | 27 --- .../internalinterfaces/BUILD.bazel | 14 -- .../leaderelection/resourcelock/BUILD.bazel | 20 -- .../client/listers/apps/v1beta1/BUILD.bazel | 24 -- .../pkg/client/listers/core/v1/BUILD.bazel | 37 ---- .../listers/extensions/v1beta1/BUILD.bazel | 30 --- .../kubernetes/pkg/client/retry/BUILD.bazel | 13 -- .../pkg/client/unversioned/BUILD.bazel | 30 --- .../kubernetes/pkg/controller/BUILD.bazel | 55 ----- .../pkg/controller/daemon/BUILD.bazel | 54 ----- .../pkg/controller/daemon/util/BUILD.bazel | 19 -- .../controller/deployment/util/BUILD.bazel | 37 ---- .../pkg/credentialprovider/BUILD.bazel | 21 -- .../kubernetes/pkg/features/BUILD.bazel | 13 -- .../kubernetes/pkg/fieldpath/BUILD.bazel | 13 -- .../k8s.io/kubernetes/pkg/kubectl/BUILD.bazel | 109 --------- .../pkg/kubectl/cmd/util/BUILD.bazel | 69 ------ .../pkg/kubectl/cmd/util/openapi/BUILD.bazel | 23 -- .../pkg/kubectl/plugins/BUILD.bazel | 20 -- .../pkg/kubectl/resource/BUILD.bazel | 41 ---- .../kubernetes/pkg/kubectl/util/BUILD.bazel | 13 -- .../kubernetes/pkg/kubelet/apis/BUILD.bazel | 12 - .../kubernetes/pkg/kubelet/qos/BUILD.bazel | 16 -- .../kubernetes/pkg/kubelet/types/BUILD.bazel | 20 -- .../kubernetes/pkg/master/ports/BUILD.bazel | 12 - .../kubernetes/pkg/printers/BUILD.bazel | 35 --- .../pkg/printers/internalversion/BUILD.bazel | 63 ------ .../pkg/registry/rbac/validation/BUILD.bazel | 24 -- .../pkg/security/apparmor/BUILD.bazel | 19 -- .../kubernetes/pkg/serviceaccount/BUILD.bazel | 22 -- .../k8s.io/kubernetes/pkg/util/BUILD.bazel | 15 -- .../kubernetes/pkg/util/exec/BUILD.bazel | 13 -- .../kubernetes/pkg/util/hash/BUILD.bazel | 10 - .../kubernetes/pkg/util/labels/BUILD.bazel | 13 -- .../kubernetes/pkg/util/metrics/BUILD.bazel | 15 -- .../kubernetes/pkg/util/mount/BUILD.bazel | 26 --- .../kubernetes/pkg/util/net/sets/BUILD.bazel | 12 - .../kubernetes/pkg/util/node/BUILD.bazel | 19 -- .../kubernetes/pkg/util/parsers/BUILD.bazel | 10 - .../kubernetes/pkg/util/slice/BUILD.bazel | 10 - .../k8s.io/kubernetes/pkg/version/BUILD.bazel | 14 -- .../kubernetes/pkg/volume/util/BUILD.bazel | 65 ------ .../pkg/scheduler/algorithm/BUILD.bazel | 22 -- .../algorithm/predicates/BUILD.bazel | 35 --- .../algorithm/priorities/util/BUILD.bazel | 21 -- .../plugin/pkg/scheduler/api/BUILD.bazel | 20 -- .../pkg/scheduler/schedulercache/BUILD.bazel | 27 --- .../plugin/pkg/scheduler/util/BUILD.bazel | 17 -- .../metrics/pkg/apis/metrics/BUILD.bazel | 24 -- .../pkg/apis/metrics/v1alpha1/BUILD.bazel | 30 --- .../clientset_generated/clientset/BUILD.bazel | 19 -- .../clientset/scheme/BUILD.bazel | 19 -- .../typed/metrics/v1alpha1/BUILD.bazel | 23 -- tests/smoke/vendor/vbom.ml/util/BUILD.bazel | 12 - .../vendor/vbom.ml/util/sortorder/BUILD.bazel | 12 - vendor/bitbucket.org/ww/goautoneg/BUILD.bazel | 9 - .../github.com/AlecAivazis/survey/BUILD.bazel | 24 -- .../NYTimes/gziphandler/BUILD.bazel | 12 - .../github.com/PuerkitoBio/purell/BUILD.bazel | 15 -- .../github.com/PuerkitoBio/urlesc/BUILD.bazel | 9 - .../github.com/ajeddeloh/go-json/BUILD.bazel | 17 -- .../alecthomas/template/BUILD.bazel | 16 -- .../alecthomas/template/parse/BUILD.bazel | 13 -- .../github.com/alecthomas/units/BUILD.bazel | 14 -- .../apparentlymart/go-cidr/cidr/BUILD.bazel | 12 - vendor/github.com/aws/aws-sdk-go/BUILD.bazel | 9 - .../github.com/aws/aws-sdk-go/aws/BUILD.bazel | 29 --- .../aws/aws-sdk-go/aws/awserr/BUILD.bazel | 12 - .../aws/aws-sdk-go/aws/awsutil/BUILD.bazel | 16 -- .../aws/aws-sdk-go/aws/client/BUILD.bazel | 19 -- .../aws/client/metadata/BUILD.bazel | 9 - .../aws-sdk-go/aws/corehandlers/BUILD.bazel | 19 -- .../aws-sdk-go/aws/credentials/BUILD.bazel | 20 -- .../aws/credentials/ec2rolecreds/BUILD.bazel | 16 -- .../aws/credentials/endpointcreds/BUILD.bazel | 17 -- .../aws/credentials/stscreds/BUILD.bazel | 16 -- .../aws/aws-sdk-go/aws/csm/BUILD.bazel | 20 -- .../aws/aws-sdk-go/aws/defaults/BUILD.bazel | 24 -- .../aws-sdk-go/aws/ec2metadata/BUILD.bazel | 21 -- .../aws/aws-sdk-go/aws/endpoints/BUILD.bazel | 16 -- .../aws/aws-sdk-go/aws/request/BUILD.bazel | 32 --- .../aws/aws-sdk-go/aws/session/BUILD.bazel | 27 --- .../aws/aws-sdk-go/aws/signer/v4/BUILD.bazel | 21 -- .../aws/aws-sdk-go/internal/sdkio/BUILD.bazel | 12 - .../aws-sdk-go/internal/sdkrand/BUILD.bazel | 9 - .../aws-sdk-go/internal/sdkuri/BUILD.bazel | 9 - .../internal/shareddefaults/BUILD.bazel | 9 - .../aws-sdk-go/private/protocol/BUILD.bazel | 20 -- .../private/protocol/ec2query/BUILD.bazel | 18 -- .../private/protocol/eventstream/BUILD.bazel | 18 -- .../eventstream/eventstreamapi/BUILD.bazel | 17 -- .../private/protocol/query/BUILD.bazel | 19 -- .../protocol/query/queryutil/BUILD.bazel | 10 - .../private/protocol/rest/BUILD.bazel | 19 -- .../private/protocol/restxml/BUILD.bazel | 16 -- .../private/protocol/xml/xmlutil/BUILD.bazel | 14 -- .../service/autoscaling/BUILD.bazel | 25 --- .../aws/aws-sdk-go/service/ec2/BUILD.bazel | 28 --- .../aws/aws-sdk-go/service/elb/BUILD.bazel | 24 -- .../aws/aws-sdk-go/service/iam/BUILD.bazel | 25 --- .../aws-sdk-go/service/route53/BUILD.bazel | 27 --- .../aws/aws-sdk-go/service/s3/BUILD.bazel | 40 ---- .../aws-sdk-go/service/s3/s3iface/BUILD.bazel | 14 -- .../service/s3/s3manager/BUILD.bazel | 25 --- .../aws/aws-sdk-go/service/sts/BUILD.bazel | 24 -- .../beorn7/perks/quantile/BUILD.bazel | 9 - vendor/github.com/coreos/etcd/BUILD.bazel | 16 -- .../github.com/coreos/etcd/alarm/BUILD.bazel | 15 -- .../github.com/coreos/etcd/auth/BUILD.bazel | 23 -- .../coreos/etcd/auth/authpb/BUILD.bazel | 10 - .../github.com/coreos/etcd/client/BUILD.bazel | 29 --- .../coreos/etcd/clientv3/BUILD.bazel | 42 ---- .../coreos/etcd/compactor/BUILD.bazel | 19 -- .../coreos/etcd/discovery/BUILD.bazel | 20 -- .../github.com/coreos/etcd/error/BUILD.bazel | 9 - .../coreos/etcd/etcdserver/BUILD.bazel | 67 ------ .../coreos/etcd/etcdserver/api/BUILD.bazel | 20 -- .../etcd/etcdserver/api/v2http/BUILD.bazel | 38 ---- .../api/v2http/httptypes/BUILD.bazel | 16 -- .../etcd/etcdserver/api/v3rpc/BUILD.bazel | 48 ---- .../etcdserver/api/v3rpc/rpctypes/BUILD.bazel | 17 -- .../coreos/etcd/etcdserver/auth/BUILD.bazel | 21 -- .../etcd/etcdserver/etcdserverpb/BUILD.bazel | 25 --- .../etcd/etcdserver/membership/BUILD.bazel | 27 --- .../coreos/etcd/etcdserver/stats/BUILD.bazel | 18 -- .../coreos/etcd/integration/BUILD.bazel | 30 --- .../github.com/coreos/etcd/lease/BUILD.bazel | 17 -- .../coreos/etcd/lease/leasehttp/BUILD.bazel | 19 -- .../coreos/etcd/lease/leasepb/BUILD.bazel | 13 -- .../github.com/coreos/etcd/mvcc/BUILD.bazel | 33 --- .../coreos/etcd/mvcc/backend/BUILD.bazel | 21 -- .../coreos/etcd/mvcc/mvccpb/BUILD.bazel | 10 - .../coreos/etcd/pkg/adt/BUILD.bazel | 12 - .../coreos/etcd/pkg/contention/BUILD.bazel | 12 - .../coreos/etcd/pkg/cpuutil/BUILD.bazel | 12 - .../coreos/etcd/pkg/crc/BUILD.bazel | 9 - .../coreos/etcd/pkg/fileutil/BUILD.bazel | 31 --- .../coreos/etcd/pkg/httputil/BUILD.bazel | 9 - .../coreos/etcd/pkg/idutil/BUILD.bazel | 9 - .../coreos/etcd/pkg/ioutil/BUILD.bazel | 15 -- .../coreos/etcd/pkg/logutil/BUILD.bazel | 10 - .../coreos/etcd/pkg/monotime/BUILD.bazel | 13 -- .../coreos/etcd/pkg/netutil/BUILD.bazel | 25 --- .../coreos/etcd/pkg/pathutil/BUILD.bazel | 9 - .../coreos/etcd/pkg/pbutil/BUILD.bazel | 10 - .../coreos/etcd/pkg/runtime/BUILD.bazel | 12 - .../coreos/etcd/pkg/schedule/BUILD.bazel | 13 -- .../coreos/etcd/pkg/testutil/BUILD.bazel | 14 -- .../coreos/etcd/pkg/tlsutil/BUILD.bazel | 12 - .../coreos/etcd/pkg/transport/BUILD.bazel | 25 --- .../coreos/etcd/pkg/types/BUILD.bazel | 16 -- .../coreos/etcd/pkg/wait/BUILD.bazel | 12 - .../coreos/etcd/proxy/grpcproxy/BUILD.bazel | 38 ---- .../etcd/proxy/grpcproxy/cache/BUILD.bazel | 15 -- .../github.com/coreos/etcd/raft/BUILD.bazel | 26 --- .../coreos/etcd/raft/raftpb/BUILD.bazel | 10 - .../coreos/etcd/rafthttp/BUILD.bazel | 44 ---- .../github.com/coreos/etcd/snap/BUILD.bazel | 24 -- .../coreos/etcd/snap/snappb/BUILD.bazel | 10 - .../github.com/coreos/etcd/store/BUILD.bazel | 28 --- .../coreos/etcd/version/BUILD.bazel | 10 - vendor/github.com/coreos/etcd/wal/BUILD.bazel | 31 --- .../coreos/etcd/wal/walpb/BUILD.bazel | 13 -- .../github.com/coreos/go-semver/BUILD.bazel | 16 -- .../coreos/go-semver/semver/BUILD.bazel | 12 - .../coreos/go-systemd/daemon/BUILD.bazel | 12 - .../coreos/go-systemd/dbus/BUILD.bazel | 17 -- .../coreos/go-systemd/journal/BUILD.bazel | 9 - .../coreos/go-systemd/unit/BUILD.bazel | 14 -- .../ignition/config/shared/errors/BUILD.bazel | 9 - .../config/shared/validations/BUILD.bazel | 14 -- .../coreos/ignition/config/util/BUILD.bazel | 19 -- .../coreos/ignition/config/v1/BUILD.bazel | 20 -- .../ignition/config/v1/types/BUILD.bazel | 28 --- .../coreos/ignition/config/v2_0/BUILD.bazel | 25 --- .../ignition/config/v2_0/types/BUILD.bazel | 37 ---- .../coreos/ignition/config/v2_1/BUILD.bazel | 25 --- .../ignition/config/v2_1/types/BUILD.bazel | 35 --- .../coreos/ignition/config/v2_2/BUILD.bazel | 25 --- .../ignition/config/v2_2/types/BUILD.bazel | 36 --- .../v2_3_experimental/types/BUILD.bazel | 36 --- .../ignition/config/validate/BUILD.bazel | 15 -- .../config/validate/astjson/BUILD.bazel | 14 -- .../config/validate/astnode/BUILD.bazel | 9 - .../config/validate/report/BUILD.bazel | 9 - .../coreos/pkg/capnslog/BUILD.bazel | 52 ----- .../github.com/coreos/pkg/health/BUILD.bazel | 10 - .../coreos/pkg/httputil/BUILD.bazel | 12 - .../coreos/pkg/timeutil/BUILD.bazel | 9 - .../config/kube-addon/BUILD.bazel | 10 - .../config/kube-core/BUILD.bazel | 10 - .../config/tectonic-network/BUILD.bazel | 10 - .../config/tectonic-utility/BUILD.bazel | 10 - .../davecgh/go-spew/spew/BUILD.bazel | 17 -- .../elazarl/go-bindata-assetfs/BUILD.bazel | 12 - .../emicklei/go-restful-swagger12/BUILD.bazel | 24 -- .../emicklei/go-restful/BUILD.bazel | 37 ---- .../emicklei/go-restful/log/BUILD.bazel | 9 - .../github.com/evanphx/json-patch/BUILD.bazel | 12 - vendor/github.com/ghodss/yaml/BUILD.bazel | 13 -- vendor/github.com/go-ini/ini/BUILD.bazel | 16 -- .../go-openapi/jsonpointer/BUILD.bazel | 10 - .../go-openapi/jsonreference/BUILD.bazel | 13 -- vendor/github.com/go-openapi/spec/BUILD.bazel | 36 --- vendor/github.com/go-openapi/swag/BUILD.bazel | 23 -- .../gogo/protobuf/proto/BUILD.bazel | 33 --- .../gogo/protobuf/sortkeys/BUILD.bazel | 9 - vendor/github.com/golang/glog/BUILD.bazel | 12 - .../golang/protobuf/jsonpb/BUILD.bazel | 13 -- .../golang/protobuf/proto/BUILD.bazel | 21 -- .../golang/protobuf/ptypes/BUILD.bazel | 20 -- .../golang/protobuf/ptypes/any/BUILD.bazel | 10 - .../protobuf/ptypes/duration/BUILD.bazel | 10 - .../golang/protobuf/ptypes/struct/BUILD.bazel | 10 - .../protobuf/ptypes/timestamp/BUILD.bazel | 10 - vendor/github.com/google/btree/BUILD.bazel | 9 - vendor/github.com/google/gofuzz/BUILD.bazel | 12 - .../github.com/googleapis/gnostic/BUILD.bazel | 24 -- .../googleapis/gnostic/OpenAPIv2/BUILD.bazel | 18 -- .../googleapis/gnostic/compiler/BUILD.bazel | 22 -- .../googleapis/gnostic/extensions/BUILD.bazel | 17 -- .../gregjones/httpcache/BUILD.bazel | 9 - .../gregjones/httpcache/diskcache/BUILD.bazel | 10 - .../go-grpc-prometheus/BUILD.bazel | 21 -- .../grpc-gateway/runtime/BUILD.bazel | 34 --- .../grpc-gateway/runtime/internal/BUILD.bazel | 10 - .../grpc-gateway/utilities/BUILD.bazel | 13 -- .../hashicorp/golang-lru/BUILD.bazel | 14 -- .../golang-lru/simplelru/BUILD.bazel | 9 - vendor/github.com/howeyc/gopass/BUILD.bazel | 49 ----- vendor/github.com/imdario/mergo/BUILD.bazel | 14 -- .../inconshreveable/mousetrap/BUILD.bazel | 13 -- .../jmespath/go-jmespath/BUILD.bazel | 18 -- .../github.com/json-iterator/go/BUILD.bazel | 48 ---- vendor/github.com/juju/ratelimit/BUILD.bazel | 12 - .../kballard/go-shellquote/BUILD.bazel | 13 -- .../pkg/builders/BUILD.bazel | 39 ---- .../github.com/libvirt/libvirt-go/BUILD.bazel | 88 -------- vendor/github.com/mailru/easyjson/BUILD.bazel | 16 -- .../mailru/easyjson/buffer/BUILD.bazel | 9 - .../mailru/easyjson/jlexer/BUILD.bazel | 13 -- .../mailru/easyjson/jwriter/BUILD.bazel | 10 - .../github.com/mattn/go-colorable/BUILD.bazel | 49 ----- vendor/github.com/mattn/go-isatty/BUILD.bazel | 28 --- .../pbutil/BUILD.bazel | 14 -- vendor/github.com/mgutz/ansi/BUILD.bazel | 14 -- .../mxk/go-flowrate/flowrate/BUILD.bazel | 13 -- .../pkg/aws_tag_deprovision/BUILD.bazel | 24 -- vendor/github.com/pborman/uuid/BUILD.bazel | 20 -- .../github.com/peterbourgon/diskv/BUILD.bazel | 14 -- .../pmezard/go-difflib/difflib/BUILD.bazel | 9 - .../client_golang/prometheus/BUILD.bazel | 37 ---- .../prometheus/client_model/go/BUILD.bazel | 10 - .../prometheus/common/expfmt/BUILD.bazel | 22 -- .../bitbucket.org/ww/goautoneg/BUILD.bazel | 9 - .../prometheus/common/model/BUILD.bazel | 21 -- .../github.com/prometheus/procfs/BUILD.bazel | 23 -- .../prometheus/procfs/xfs/BUILD.bazel | 12 - vendor/github.com/shurcooL/vfsgen/BUILD.bazel | 16 -- vendor/github.com/sirupsen/logrus/BUILD.bazel | 31 --- vendor/github.com/spf13/cobra/BUILD.bazel | 25 --- vendor/github.com/spf13/pflag/BUILD.bazel | 36 --- .../github.com/stretchr/testify/BUILD.bazel | 14 -- .../stretchr/testify/assert/BUILD.bazel | 21 -- vendor/github.com/ugorji/go/codec/BUILD.bazel | 34 --- .../vincent-petithory/dataurl/BUILD.bazel | 14 -- vendor/go4.org/errorutil/BUILD.bazel | 9 - .../x/crypto/ssh/terminal/BUILD.bazel | 44 ---- vendor/golang.org/x/net/context/BUILD.bazel | 15 -- vendor/golang.org/x/net/html/BUILD.bazel | 21 -- vendor/golang.org/x/net/html/atom/BUILD.bazel | 12 - vendor/golang.org/x/net/http2/BUILD.bazel | 41 ---- .../golang.org/x/net/http2/hpack/BUILD.bazel | 14 -- vendor/golang.org/x/net/idna/BUILD.bazel | 19 -- .../x/net/internal/timeseries/BUILD.bazel | 9 - .../golang.org/x/net/lex/httplex/BUILD.bazel | 10 - vendor/golang.org/x/net/trace/BUILD.bazel | 19 -- vendor/golang.org/x/net/websocket/BUILD.bazel | 15 -- vendor/golang.org/x/sys/unix/BUILD.bazel | 208 ------------------ vendor/golang.org/x/sys/windows/BUILD.bazel | 29 --- vendor/golang.org/x/text/BUILD.bazel | 9 - vendor/golang.org/x/text/cases/BUILD.bazel | 24 -- vendor/golang.org/x/text/internal/BUILD.bazel | 14 -- .../x/text/internal/tag/BUILD.bazel | 9 - vendor/golang.org/x/text/language/BUILD.bazel | 22 -- vendor/golang.org/x/text/runes/BUILD.bazel | 13 -- .../x/text/secure/bidirule/BUILD.bazel | 13 -- .../x/text/secure/precis/BUILD.bazel | 29 --- .../golang.org/x/text/transform/BUILD.bazel | 9 - .../x/text/unicode/bidi/BUILD.bazel | 16 -- .../x/text/unicode/norm/BUILD.bazel | 20 -- vendor/golang.org/x/text/width/BUILD.bazel | 16 -- .../googleapis/rpc/status/BUILD.bazel | 13 -- vendor/google.golang.org/grpc/BUILD.bazel | 44 ---- .../google.golang.org/grpc/codes/BUILD.bazel | 12 - .../grpc/credentials/BUILD.bazel | 15 -- .../grpc/grpclb/grpc_lb_v1/BUILD.bazel | 10 - .../grpc/grpclog/BUILD.bazel | 9 - .../grpc/internal/BUILD.bazel | 9 - .../grpc/keepalive/BUILD.bazel | 9 - .../grpc/metadata/BUILD.bazel | 10 - .../google.golang.org/grpc/naming/BUILD.bazel | 9 - .../google.golang.org/grpc/peer/BUILD.bazel | 13 -- .../google.golang.org/grpc/stats/BUILD.bazel | 13 -- .../google.golang.org/grpc/status/BUILD.bazel | 14 -- vendor/google.golang.org/grpc/tap/BUILD.bazel | 10 - .../grpc/transport/BUILD.bazel | 34 --- .../AlecAivazis/survey.v1/BUILD.bazel | 24 -- .../AlecAivazis/survey.v1/core/BUILD.bazel | 17 -- .../survey.v1/terminal/BUILD.bazel | 34 --- .../alecthomas/kingpin.v2/BUILD.bazel | 32 --- vendor/gopkg.in/inf.v0/BUILD.bazel | 12 - vendor/gopkg.in/yaml.v2/BUILD.bazel | 23 -- .../k8s.io/api/admission/v1beta1/BUILD.bazel | 24 -- .../v1alpha1/BUILD.bazel | 22 -- .../admissionregistration/v1beta1/BUILD.bazel | 22 -- vendor/k8s.io/api/apps/v1/BUILD.bazel | 24 -- vendor/k8s.io/api/apps/v1beta1/BUILD.bazel | 25 --- vendor/k8s.io/api/apps/v1beta2/BUILD.bazel | 25 --- .../k8s.io/api/authentication/v1/BUILD.bazel | 23 -- .../api/authentication/v1beta1/BUILD.bazel | 23 -- .../k8s.io/api/authorization/v1/BUILD.bazel | 23 -- .../api/authorization/v1beta1/BUILD.bazel | 23 -- vendor/k8s.io/api/autoscaling/v1/BUILD.bazel | 24 -- .../api/autoscaling/v2beta1/BUILD.bazel | 24 -- vendor/k8s.io/api/batch/v1/BUILD.bazel | 23 -- vendor/k8s.io/api/batch/v1beta1/BUILD.bazel | 24 -- vendor/k8s.io/api/batch/v2alpha1/BUILD.bazel | 24 -- .../api/certificates/v1beta1/BUILD.bazel | 23 -- vendor/k8s.io/api/core/v1/BUILD.bazel | 32 --- vendor/k8s.io/api/events/v1beta1/BUILD.bazel | 23 -- .../k8s.io/api/extensions/v1beta1/BUILD.bazel | 27 --- vendor/k8s.io/api/networking/v1/BUILD.bazel | 24 -- vendor/k8s.io/api/policy/v1beta1/BUILD.bazel | 24 -- vendor/k8s.io/api/rbac/v1/BUILD.bazel | 22 -- vendor/k8s.io/api/rbac/v1alpha1/BUILD.bazel | 22 -- vendor/k8s.io/api/rbac/v1beta1/BUILD.bazel | 22 -- .../api/scheduling/v1alpha1/BUILD.bazel | 22 -- .../k8s.io/api/settings/v1alpha1/BUILD.bazel | 23 -- vendor/k8s.io/api/storage/v1/BUILD.bazel | 24 -- .../k8s.io/api/storage/v1alpha1/BUILD.bazel | 23 -- vendor/k8s.io/api/storage/v1beta1/BUILD.bazel | 24 -- .../apimachinery/pkg/api/equality/BUILD.bazel | 16 -- .../apimachinery/pkg/api/errors/BUILD.bazel | 18 -- .../apimachinery/pkg/api/meta/BUILD.bazel | 32 --- .../apimachinery/pkg/api/resource/BUILD.bazel | 25 --- .../pkg/api/validation/BUILD.bazel | 23 -- .../pkg/api/validation/path/BUILD.bazel | 9 - .../apimachinery/pkg/apimachinery/BUILD.bazel | 17 -- .../pkg/apimachinery/announced/BUILD.bazel | 21 -- .../pkg/apimachinery/registered/BUILD.bazel | 16 -- .../pkg/apis/meta/internalversion/BUILD.bazel | 27 --- .../apimachinery/pkg/apis/meta/v1/BUILD.bazel | 46 ---- .../pkg/apis/meta/v1/unstructured/BUILD.bazel | 22 -- .../pkg/apis/meta/v1/validation/BUILD.bazel | 14 -- .../pkg/apis/meta/v1alpha1/BUILD.bazel | 26 --- .../apimachinery/pkg/conversion/BUILD.bazel | 15 -- .../pkg/conversion/queryparams/BUILD.bazel | 12 - .../apimachinery/pkg/fields/BUILD.bazel | 15 -- .../apimachinery/pkg/labels/BUILD.bazel | 20 -- .../apimachinery/pkg/runtime/BUILD.bazel | 38 ---- .../pkg/runtime/schema/BUILD.bazel | 14 -- .../pkg/runtime/serializer/BUILD.bazel | 21 -- .../pkg/runtime/serializer/json/BUILD.bazel | 21 -- .../runtime/serializer/protobuf/BUILD.bazel | 19 -- .../runtime/serializer/recognizer/BUILD.bazel | 13 -- .../runtime/serializer/streaming/BUILD.bazel | 13 -- .../runtime/serializer/versioning/BUILD.bazel | 13 -- .../apimachinery/pkg/selection/BUILD.bazel | 9 - .../k8s.io/apimachinery/pkg/types/BUILD.bazel | 15 -- .../apimachinery/pkg/util/cache/BUILD.bazel | 13 -- .../apimachinery/pkg/util/clock/BUILD.bazel | 9 - .../apimachinery/pkg/util/diff/BUILD.bazel | 13 -- .../apimachinery/pkg/util/errors/BUILD.bazel | 12 - .../apimachinery/pkg/util/framer/BUILD.bazel | 9 - .../pkg/util/httpstream/BUILD.bazel | 12 - .../apimachinery/pkg/util/intstr/BUILD.bazel | 19 -- .../apimachinery/pkg/util/json/BUILD.bazel | 9 - .../pkg/util/mergepatch/BUILD.bazel | 16 -- .../apimachinery/pkg/util/net/BUILD.bazel | 20 -- .../apimachinery/pkg/util/proxy/BUILD.bazel | 26 --- .../apimachinery/pkg/util/rand/BUILD.bazel | 9 - .../apimachinery/pkg/util/runtime/BUILD.bazel | 10 - .../apimachinery/pkg/util/sets/BUILD.bazel | 16 -- .../pkg/util/strategicpatch/BUILD.bazel | 21 -- .../apimachinery/pkg/util/uuid/BUILD.bazel | 13 -- .../pkg/util/validation/BUILD.bazel | 10 - .../pkg/util/validation/field/BUILD.bazel | 16 -- .../apimachinery/pkg/util/wait/BUILD.bazel | 13 -- .../pkg/util/waitgroup/BUILD.bazel | 12 - .../apimachinery/pkg/util/yaml/BUILD.bazel | 13 -- .../apimachinery/pkg/version/BUILD.bazel | 12 - .../k8s.io/apimachinery/pkg/watch/BUILD.bazel | 25 --- .../forked/golang/json/BUILD.bazel | 9 - .../forked/golang/netutil/BUILD.bazel | 9 - .../forked/golang/reflect/BUILD.bazel | 9 - .../apiserver/pkg/admission/BUILD.bazel | 31 --- .../pkg/admission/configuration/BUILD.bazel | 23 -- .../pkg/admission/initializer/BUILD.bazel | 19 -- .../pkg/admission/metrics/BUILD.bazel | 13 -- .../plugin/initialization/BUILD.bazel | 27 --- .../plugin/namespace/lifecycle/BUILD.bazel | 24 -- .../plugin/webhook/config/BUILD.bazel | 28 --- .../config/apis/webhookadmission/BUILD.bazel | 19 -- .../webhookadmission/v1alpha1/BUILD.bazel | 23 -- .../plugin/webhook/errors/BUILD.bazel | 17 -- .../plugin/webhook/mutating/BUILD.bazel | 37 ---- .../plugin/webhook/namespace/BUILD.bazel | 23 -- .../plugin/webhook/request/BUILD.bazel | 20 -- .../plugin/webhook/rules/BUILD.bazel | 13 -- .../plugin/webhook/validating/BUILD.bazel | 37 ---- .../plugin/webhook/versioned/BUILD.bazel | 18 -- .../apiserver/pkg/apis/apiserver/BUILD.bazel | 19 -- .../pkg/apis/apiserver/install/BUILD.bazel | 17 -- .../pkg/apis/apiserver/v1alpha1/BUILD.bazel | 24 -- .../apiserver/pkg/apis/audit/BUILD.bazel | 21 -- .../pkg/apis/audit/v1alpha1/BUILD.bazel | 28 --- .../pkg/apis/audit/v1beta1/BUILD.bazel | 28 --- .../pkg/apis/audit/validation/BUILD.bazel | 14 -- vendor/k8s.io/apiserver/pkg/audit/BUILD.bazel | 33 --- .../apiserver/pkg/audit/policy/BUILD.bazel | 22 -- .../authentication/authenticator/BUILD.bazel | 10 - .../authenticatorfactory/BUILD.bazel | 29 --- .../pkg/authentication/group/BUILD.bazel | 17 -- .../request/anonymous/BUILD.bazel | 13 -- .../request/bearertoken/BUILD.bazel | 13 -- .../request/headerrequest/BUILD.bazel | 16 -- .../authentication/request/union/BUILD.bazel | 14 -- .../request/websocket/BUILD.bazel | 14 -- .../authentication/request/x509/BUILD.bazel | 20 -- .../authentication/serviceaccount/BUILD.bazel | 10 - .../token/tokenfile/BUILD.bazel | 13 -- .../pkg/authentication/user/BUILD.bazel | 12 - .../pkg/authorization/authorizer/BUILD.bazel | 13 -- .../authorizerfactory/BUILD.bazel | 18 -- .../pkg/authorization/union/BUILD.bazel | 14 -- .../apiserver/pkg/endpoints/BUILD.bazel | 32 --- .../pkg/endpoints/discovery/BUILD.bazel | 26 --- .../pkg/endpoints/filters/BUILD.bazel | 41 ---- .../pkg/endpoints/handlers/BUILD.bazel | 54 ----- .../handlers/negotiation/BUILD.bazel | 19 -- .../handlers/responsewriters/BUILD.bazel | 30 --- .../pkg/endpoints/metrics/BUILD.bazel | 15 -- .../pkg/endpoints/openapi/BUILD.bazel | 17 -- .../pkg/endpoints/request/BUILD.bazel | 22 -- .../k8s.io/apiserver/pkg/features/BUILD.bazel | 10 - .../pkg/registry/generic/BUILD.bazel | 24 -- .../pkg/registry/generic/registry/BUILD.bazel | 40 ---- .../apiserver/pkg/registry/rest/BUILD.bazel | 39 ---- .../k8s.io/apiserver/pkg/server/BUILD.bazel | 76 ------- .../apiserver/pkg/server/filters/BUILD.bazel | 30 --- .../apiserver/pkg/server/healthz/BUILD.bazel | 13 -- .../apiserver/pkg/server/httplog/BUILD.bazel | 13 -- .../apiserver/pkg/server/mux/BUILD.bazel | 17 -- .../apiserver/pkg/server/routes/BUILD.bazel | 35 --- .../server/routes/data/swagger/BUILD.bazel | 9 - .../k8s.io/apiserver/pkg/storage/BUILD.bazel | 40 ---- .../apiserver/pkg/storage/errors/BUILD.bazel | 17 -- .../apiserver/pkg/storage/etcd/BUILD.bazel | 30 --- .../pkg/storage/etcd/metrics/BUILD.bazel | 10 - .../pkg/storage/etcd/util/BUILD.bazel | 13 -- .../apiserver/pkg/storage/etcd3/BUILD.bazel | 32 --- .../apiserver/pkg/storage/names/BUILD.bazel | 10 - .../pkg/storage/storagebackend/BUILD.bazel | 13 -- .../storagebackend/factory/BUILD.bazel | 25 --- .../apiserver/pkg/storage/value/BUILD.bazel | 9 - .../apiserver/pkg/util/feature/BUILD.bazel | 13 -- .../pkg/util/flushwriter/BUILD.bazel | 12 - .../apiserver/pkg/util/trace/BUILD.bazel | 10 - .../apiserver/pkg/util/webhook/BUILD.bazel | 20 -- .../apiserver/pkg/util/wsstream/BUILD.bazel | 18 -- .../authenticator/token/webhook/BUILD.bazel | 20 -- .../plugin/pkg/authorizer/webhook/BUILD.bazel | 21 -- vendor/k8s.io/client-go/discovery/BUILD.bazel | 29 --- vendor/k8s.io/client-go/informers/BUILD.bazel | 58 ----- .../admissionregistration/BUILD.bazel | 14 -- .../v1alpha1/BUILD.bazel | 22 -- .../admissionregistration/v1beta1/BUILD.bazel | 23 -- .../client-go/informers/apps/BUILD.bazel | 15 -- .../client-go/informers/apps/v1/BUILD.bazel | 26 --- .../informers/apps/v1beta1/BUILD.bazel | 24 -- .../informers/apps/v1beta2/BUILD.bazel | 26 --- .../informers/autoscaling/BUILD.bazel | 14 -- .../informers/autoscaling/v1/BUILD.bazel | 22 -- .../informers/autoscaling/v2beta1/BUILD.bazel | 22 -- .../client-go/informers/batch/BUILD.bazel | 15 -- .../client-go/informers/batch/v1/BUILD.bazel | 22 -- .../informers/batch/v1beta1/BUILD.bazel | 22 -- .../informers/batch/v2alpha1/BUILD.bazel | 22 -- .../informers/certificates/BUILD.bazel | 13 -- .../certificates/v1beta1/BUILD.bazel | 22 -- .../client-go/informers/core/BUILD.bazel | 13 -- .../client-go/informers/core/v1/BUILD.bazel | 37 ---- .../client-go/informers/events/BUILD.bazel | 13 -- .../informers/events/v1beta1/BUILD.bazel | 22 -- .../informers/extensions/BUILD.bazel | 13 -- .../informers/extensions/v1beta1/BUILD.bazel | 26 --- .../informers/internalinterfaces/BUILD.bazel | 15 -- .../informers/networking/BUILD.bazel | 13 -- .../informers/networking/v1/BUILD.bazel | 22 -- .../client-go/informers/policy/BUILD.bazel | 13 -- .../informers/policy/v1beta1/BUILD.bazel | 22 -- .../client-go/informers/rbac/BUILD.bazel | 15 -- .../client-go/informers/rbac/v1/BUILD.bazel | 25 --- .../informers/rbac/v1alpha1/BUILD.bazel | 25 --- .../informers/rbac/v1beta1/BUILD.bazel | 25 --- .../informers/scheduling/BUILD.bazel | 13 -- .../informers/scheduling/v1alpha1/BUILD.bazel | 22 -- .../client-go/informers/settings/BUILD.bazel | 13 -- .../informers/settings/v1alpha1/BUILD.bazel | 22 -- .../client-go/informers/storage/BUILD.bazel | 15 -- .../informers/storage/v1/BUILD.bazel | 22 -- .../informers/storage/v1alpha1/BUILD.bazel | 22 -- .../informers/storage/v1beta1/BUILD.bazel | 22 -- .../k8s.io/client-go/kubernetes/BUILD.bazel | 47 ---- .../client-go/kubernetes/scheme/BUILD.bazel | 46 ---- .../v1alpha1/BUILD.bazel | 23 -- .../admissionregistration/v1beta1/BUILD.bazel | 24 -- .../kubernetes/typed/apps/v1/BUILD.bazel | 27 --- .../kubernetes/typed/apps/v1beta1/BUILD.bazel | 26 --- .../kubernetes/typed/apps/v1beta2/BUILD.bazel | 28 --- .../typed/authentication/v1/BUILD.bazel | 21 -- .../typed/authentication/v1beta1/BUILD.bazel | 21 -- .../typed/authorization/v1/BUILD.bazel | 27 --- .../typed/authorization/v1beta1/BUILD.bazel | 27 --- .../typed/autoscaling/v1/BUILD.bazel | 23 -- .../typed/autoscaling/v2beta1/BUILD.bazel | 23 -- .../kubernetes/typed/batch/v1/BUILD.bazel | 23 -- .../typed/batch/v1beta1/BUILD.bazel | 23 -- .../typed/batch/v2alpha1/BUILD.bazel | 23 -- .../typed/certificates/v1beta1/BUILD.bazel | 24 -- .../kubernetes/typed/core/v1/BUILD.bazel | 49 ----- .../typed/events/v1beta1/BUILD.bazel | 23 -- .../typed/extensions/v1beta1/BUILD.bazel | 32 --- .../typed/networking/v1/BUILD.bazel | 23 -- .../typed/policy/v1beta1/BUILD.bazel | 25 --- .../kubernetes/typed/rbac/v1/BUILD.bazel | 26 --- .../typed/rbac/v1alpha1/BUILD.bazel | 26 --- .../kubernetes/typed/rbac/v1beta1/BUILD.bazel | 26 --- .../typed/scheduling/v1alpha1/BUILD.bazel | 23 -- .../typed/settings/v1alpha1/BUILD.bazel | 23 -- .../kubernetes/typed/storage/v1/BUILD.bazel | 23 -- .../typed/storage/v1alpha1/BUILD.bazel | 23 -- .../typed/storage/v1beta1/BUILD.bazel | 23 -- .../v1alpha1/BUILD.bazel | 18 -- .../admissionregistration/v1beta1/BUILD.bazel | 19 -- .../client-go/listers/apps/v1/BUILD.bazel | 28 --- .../listers/apps/v1beta1/BUILD.bazel | 24 -- .../listers/apps/v1beta2/BUILD.bazel | 29 --- .../listers/autoscaling/v1/BUILD.bazel | 18 -- .../listers/autoscaling/v2beta1/BUILD.bazel | 18 -- .../client-go/listers/batch/v1/BUILD.bazel | 21 -- .../listers/batch/v1beta1/BUILD.bazel | 18 -- .../listers/batch/v2alpha1/BUILD.bazel | 18 -- .../listers/certificates/v1beta1/BUILD.bazel | 18 -- .../client-go/listers/core/v1/BUILD.bazel | 36 --- .../listers/events/v1beta1/BUILD.bazel | 18 -- .../listers/extensions/v1beta1/BUILD.bazel | 29 --- .../listers/networking/v1/BUILD.bazel | 18 -- .../listers/policy/v1beta1/BUILD.bazel | 23 -- .../client-go/listers/rbac/v1/BUILD.bazel | 21 -- .../listers/rbac/v1alpha1/BUILD.bazel | 21 -- .../listers/rbac/v1beta1/BUILD.bazel | 21 -- .../listers/scheduling/v1alpha1/BUILD.bazel | 18 -- .../listers/settings/v1alpha1/BUILD.bazel | 18 -- .../client-go/listers/storage/v1/BUILD.bazel | 18 -- .../listers/storage/v1alpha1/BUILD.bazel | 18 -- .../listers/storage/v1beta1/BUILD.bazel | 18 -- .../k8s.io/client-go/pkg/version/BUILD.bazel | 14 -- vendor/k8s.io/client-go/rest/BUILD.bazel | 40 ---- .../k8s.io/client-go/rest/watch/BUILD.bazel | 18 -- .../k8s.io/client-go/tools/auth/BUILD.bazel | 10 - .../k8s.io/client-go/tools/cache/BUILD.bazel | 50 ----- .../client-go/tools/clientcmd/BUILD.bazel | 36 --- .../client-go/tools/clientcmd/api/BUILD.bazel | 19 -- .../tools/clientcmd/api/latest/BUILD.bazel | 17 -- .../tools/clientcmd/api/v1/BUILD.bazel | 21 -- .../client-go/tools/metrics/BUILD.bazel | 9 - .../k8s.io/client-go/tools/pager/BUILD.bazel | 17 -- .../client-go/tools/reference/BUILD.bazel | 15 -- vendor/k8s.io/client-go/transport/BUILD.bazel | 21 -- .../k8s.io/client-go/util/buffer/BUILD.bazel | 9 - vendor/k8s.io/client-go/util/cert/BUILD.bazel | 14 -- .../client-go/util/flowcontrol/BUILD.bazel | 17 -- .../k8s.io/client-go/util/homedir/BUILD.bazel | 9 - .../k8s.io/client-go/util/integer/BUILD.bazel | 9 - .../kube-openapi/pkg/builder/BUILD.bazel | 19 -- .../kube-openapi/pkg/common/BUILD.bazel | 16 -- .../kube-openapi/pkg/handler/BUILD.bazel | 20 -- .../k8s.io/kube-openapi/pkg/util/BUILD.bazel | 12 - .../kube-openapi/pkg/util/proto/BUILD.bazel | 17 -- .../cluster-api/pkg/apis/cluster/BUILD.bazel | 26 --- .../pkg/apis/cluster/common/BUILD.bazel | 9 - .../pkg/apis/cluster/v1alpha1/BUILD.bazel | 36 --- .../clientset_generated/clientset/BUILD.bazel | 19 -- .../clientset/scheme/BUILD.bazel | 19 -- .../typed/cluster/v1alpha1/BUILD.bazel | 26 --- 1019 files changed, 21594 deletions(-) delete mode 100644 BUILD.bazel delete mode 100644 WORKSPACE delete mode 100644 bazel-rules/BUILD.bazel delete mode 100644 bazel-rules/gen_test.bzl delete mode 100755 buildvars.sh delete mode 100644 cmd/openshift-install/BUILD.bazel delete mode 100644 data/BUILD.bazel delete mode 100644 examples/BUILD.bazel delete mode 100644 installer/BUILD.bazel delete mode 100644 installer/cmd/tectonic/BUILD.bazel delete mode 100644 installer/pkg/config-generator/BUILD.bazel delete mode 100644 installer/pkg/copy/BUILD.bazel delete mode 100644 installer/pkg/validate/BUILD.bazel delete mode 100644 installer/pkg/workflow/BUILD.bazel delete mode 100644 pkg/asset/BUILD.bazel delete mode 100644 pkg/asset/cluster/BUILD.bazel delete mode 100644 pkg/asset/ignition/BUILD.bazel delete mode 100644 pkg/asset/ignition/content/BUILD.bazel delete mode 100644 pkg/asset/installconfig/BUILD.bazel delete mode 100644 pkg/asset/kubeconfig/BUILD.bazel delete mode 100644 pkg/asset/manifests/BUILD.bazel delete mode 100644 pkg/asset/manifests/content/bootkube/BUILD.bazel delete mode 100644 pkg/asset/manifests/content/tectonic/ingress/BUILD.bazel delete mode 100644 pkg/asset/manifests/content/tectonic/rbac/BUILD.bazel delete mode 100644 pkg/asset/manifests/content/tectonic/secrets/BUILD.bazel delete mode 100644 pkg/asset/manifests/content/tectonic/security/BUILD.bazel delete mode 100644 pkg/asset/manifests/content/tectonic/updater/BUILD.bazel delete mode 100644 pkg/asset/manifests/content/tectonic/updater/appversions/BUILD.bazel delete mode 100644 pkg/asset/manifests/content/tectonic/updater/operators/BUILD.bazel delete mode 100644 pkg/asset/metadata/BUILD.bazel delete mode 100644 pkg/asset/stock/BUILD.bazel delete mode 100644 pkg/asset/tls/BUILD.bazel delete mode 100644 pkg/destroy/BUILD.bazel delete mode 100644 pkg/destroy/libvirt/BUILD.bazel delete mode 100644 pkg/ipnet/BUILD.bazel delete mode 100644 pkg/rhcos/BUILD.bazel delete mode 100644 pkg/terraform/BUILD.bazel delete mode 100644 pkg/types/BUILD.bazel delete mode 100644 pkg/types/config/BUILD.bazel delete mode 100644 pkg/types/config/aws/BUILD.bazel delete mode 100644 pkg/types/config/libvirt/BUILD.bazel delete mode 100644 tests/smoke/BUILD.bazel delete mode 100644 tests/smoke/vendor/cloud.google.com/go/BUILD.bazel delete mode 100644 tests/smoke/vendor/cloud.google.com/go/compute/metadata/BUILD.bazel delete mode 100644 tests/smoke/vendor/cloud.google.com/go/internal/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/Azure/go-autorest/autorest/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/Azure/go-autorest/autorest/azure/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/Azure/go-autorest/autorest/date/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/PuerkitoBio/purell/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/PuerkitoBio/urlesc/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/beorn7/perks/quantile/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/coreos/ktestutil/testworkload/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/davecgh/go-spew/spew/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/dgrijalva/jwt-go/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/distribution/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/distribution/digest/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/distribution/reference/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/client/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/client/transport/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/client/transport/cancellable/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/types/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/types/blkiodev/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/types/container/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/types/filters/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/types/network/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/types/reference/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/types/registry/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/types/strslice/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/types/time/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/engine-api/types/versions/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/go-connections/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/go-connections/nat/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/go-connections/sockets/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/go-connections/tlsconfig/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/docker/go-units/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/emicklei/go-restful-swagger12/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/emicklei/go-restful/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/emicklei/go-restful/log/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/evanphx/json-patch/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/exponent-io/jsonpath/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/fatih/camelcase/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/ghodss/yaml/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/go-openapi/analysis/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/go-openapi/jsonpointer/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/go-openapi/jsonreference/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/go-openapi/loads/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/go-openapi/spec/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/go-openapi/swag/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/gogo/protobuf/proto/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/gogo/protobuf/sortkeys/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/golang/glog/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/golang/groupcache/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/golang/groupcache/lru/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/golang/protobuf/proto/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/google/gofuzz/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/hashicorp/golang-lru/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/hashicorp/golang-lru/simplelru/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/howeyc/gopass/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/imdario/mergo/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/juju/ratelimit/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/mailru/easyjson/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/mailru/easyjson/buffer/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/mailru/easyjson/jlexer/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/mailru/easyjson/jwriter/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/pborman/uuid/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/prometheus/client_model/go/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/prometheus/common/expfmt/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/prometheus/common/model/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/prometheus/procfs/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/prometheus/procfs/xfs/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/spf13/cobra/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/spf13/cobra/doc/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/spf13/pflag/BUILD.bazel delete mode 100644 tests/smoke/vendor/github.com/ugorji/go/codec/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/crypto/ssh/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/crypto/ssh/agent/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/crypto/ssh/terminal/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/net/context/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/net/context/ctxhttp/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/net/http2/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/net/http2/hpack/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/net/idna/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/net/lex/httplex/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/oauth2/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/oauth2/google/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/oauth2/internal/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/oauth2/jws/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/oauth2/jwt/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/sys/unix/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/cases/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/encoding/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/encoding/internal/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/encoding/internal/identifier/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/encoding/unicode/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/internal/tag/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/internal/utf8internal/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/language/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/runes/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/secure/bidirule/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/secure/precis/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/transform/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/unicode/bidi/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/unicode/norm/BUILD.bazel delete mode 100644 tests/smoke/vendor/golang.org/x/text/width/BUILD.bazel delete mode 100644 tests/smoke/vendor/google.golang.org/appengine/BUILD.bazel delete mode 100644 tests/smoke/vendor/google.golang.org/appengine/internal/BUILD.bazel delete mode 100644 tests/smoke/vendor/google.golang.org/appengine/internal/app_identity/BUILD.bazel delete mode 100644 tests/smoke/vendor/google.golang.org/appengine/internal/base/BUILD.bazel delete mode 100644 tests/smoke/vendor/google.golang.org/appengine/internal/datastore/BUILD.bazel delete mode 100644 tests/smoke/vendor/google.golang.org/appengine/internal/log/BUILD.bazel delete mode 100644 tests/smoke/vendor/google.golang.org/appengine/internal/modules/BUILD.bazel delete mode 100644 tests/smoke/vendor/google.golang.org/appengine/internal/remote_api/BUILD.bazel delete mode 100644 tests/smoke/vendor/google.golang.org/appengine/internal/urlfetch/BUILD.bazel delete mode 100644 tests/smoke/vendor/google.golang.org/appengine/urlfetch/BUILD.bazel delete mode 100644 tests/smoke/vendor/gopkg.in/inf.v0/BUILD.bazel delete mode 100644 tests/smoke/vendor/gopkg.in/yaml.v2/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/api/equality/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/api/validation/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/fields/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/openapi/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/selection/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/types/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/diff/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/json/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/rand/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/version/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/user/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apiserver/pkg/features/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/apiserver/pkg/util/flag/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/discovery/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/dynamic/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/api/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/api/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/api/v1/ref/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/util/parsers/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/pkg/version/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/rest/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/rest/watch/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/tools/auth/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/tools/cache/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/tools/metrics/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/tools/record/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/transport/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/util/cert/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/util/flowcontrol/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/util/homedir/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/util/integer/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/util/jsonpath/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/client-go/util/workqueue/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/autoscaling/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/batch/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/events/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper/qos/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/ref/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/service/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/pod/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/ref/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/internalinterfaces/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/apps/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/core/v1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/retry/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/client/unversioned/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/features/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/fieldpath/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/resource/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/master/ports/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/printers/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/util/exec/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/util/metrics/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/util/node/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/version/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel delete mode 100644 tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/BUILD.bazel delete mode 100644 tests/smoke/vendor/vbom.ml/util/BUILD.bazel delete mode 100644 tests/smoke/vendor/vbom.ml/util/sortorder/BUILD.bazel delete mode 100644 vendor/bitbucket.org/ww/goautoneg/BUILD.bazel delete mode 100644 vendor/github.com/AlecAivazis/survey/BUILD.bazel delete mode 100644 vendor/github.com/NYTimes/gziphandler/BUILD.bazel delete mode 100644 vendor/github.com/PuerkitoBio/purell/BUILD.bazel delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/BUILD.bazel delete mode 100644 vendor/github.com/ajeddeloh/go-json/BUILD.bazel delete mode 100644 vendor/github.com/alecthomas/template/BUILD.bazel delete mode 100644 vendor/github.com/alecthomas/template/parse/BUILD.bazel delete mode 100644 vendor/github.com/alecthomas/units/BUILD.bazel delete mode 100644 vendor/github.com/apparentlymart/go-cidr/cidr/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/autoscaling/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/ec2/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/elb/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/iam/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/route53/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/BUILD.bazel delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/BUILD.bazel delete mode 100644 vendor/github.com/beorn7/perks/quantile/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/alarm/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/auth/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/auth/authpb/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/client/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/clientv3/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/compactor/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/discovery/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/error/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/auth/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/membership/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/stats/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/integration/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/lease/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/lease/leasehttp/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/lease/leasepb/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/mvcc/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/mvcc/mvccpb/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/adt/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/contention/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/cpuutil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/crc/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/httputil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/idutil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/ioutil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/logutil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/monotime/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/netutil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/pathutil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/pbutil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/runtime/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/schedule/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/testutil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/tlsutil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/pkg/wait/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/raft/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/raft/raftpb/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/snap/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/snap/snappb/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/store/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/version/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/wal/BUILD.bazel delete mode 100644 vendor/github.com/coreos/etcd/wal/walpb/BUILD.bazel delete mode 100644 vendor/github.com/coreos/go-semver/BUILD.bazel delete mode 100644 vendor/github.com/coreos/go-semver/semver/BUILD.bazel delete mode 100644 vendor/github.com/coreos/go-systemd/daemon/BUILD.bazel delete mode 100644 vendor/github.com/coreos/go-systemd/dbus/BUILD.bazel delete mode 100644 vendor/github.com/coreos/go-systemd/journal/BUILD.bazel delete mode 100644 vendor/github.com/coreos/go-systemd/unit/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/shared/errors/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/shared/validations/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/util/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/v1/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/v2_3_experimental/types/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/validate/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/validate/astjson/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/validate/astnode/BUILD.bazel delete mode 100644 vendor/github.com/coreos/ignition/config/validate/report/BUILD.bazel delete mode 100644 vendor/github.com/coreos/pkg/capnslog/BUILD.bazel delete mode 100644 vendor/github.com/coreos/pkg/health/BUILD.bazel delete mode 100644 vendor/github.com/coreos/pkg/httputil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/pkg/timeutil/BUILD.bazel delete mode 100644 vendor/github.com/coreos/tectonic-config/config/kube-addon/BUILD.bazel delete mode 100644 vendor/github.com/coreos/tectonic-config/config/kube-core/BUILD.bazel delete mode 100644 vendor/github.com/coreos/tectonic-config/config/tectonic-network/BUILD.bazel delete mode 100644 vendor/github.com/coreos/tectonic-config/config/tectonic-utility/BUILD.bazel delete mode 100644 vendor/github.com/davecgh/go-spew/spew/BUILD.bazel delete mode 100644 vendor/github.com/elazarl/go-bindata-assetfs/BUILD.bazel delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/BUILD.bazel delete mode 100644 vendor/github.com/emicklei/go-restful/BUILD.bazel delete mode 100644 vendor/github.com/emicklei/go-restful/log/BUILD.bazel delete mode 100644 vendor/github.com/evanphx/json-patch/BUILD.bazel delete mode 100644 vendor/github.com/ghodss/yaml/BUILD.bazel delete mode 100644 vendor/github.com/go-ini/ini/BUILD.bazel delete mode 100644 vendor/github.com/go-openapi/jsonpointer/BUILD.bazel delete mode 100644 vendor/github.com/go-openapi/jsonreference/BUILD.bazel delete mode 100644 vendor/github.com/go-openapi/spec/BUILD.bazel delete mode 100644 vendor/github.com/go-openapi/swag/BUILD.bazel delete mode 100644 vendor/github.com/gogo/protobuf/proto/BUILD.bazel delete mode 100644 vendor/github.com/gogo/protobuf/sortkeys/BUILD.bazel delete mode 100644 vendor/github.com/golang/glog/BUILD.bazel delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/BUILD.bazel delete mode 100644 vendor/github.com/golang/protobuf/proto/BUILD.bazel delete mode 100644 vendor/github.com/golang/protobuf/ptypes/BUILD.bazel delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/BUILD.bazel delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/BUILD.bazel delete mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/BUILD.bazel delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/BUILD.bazel delete mode 100644 vendor/github.com/google/btree/BUILD.bazel delete mode 100644 vendor/github.com/google/gofuzz/BUILD.bazel delete mode 100644 vendor/github.com/googleapis/gnostic/BUILD.bazel delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/BUILD.bazel delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/BUILD.bazel delete mode 100644 vendor/github.com/googleapis/gnostic/extensions/BUILD.bazel delete mode 100644 vendor/github.com/gregjones/httpcache/BUILD.bazel delete mode 100644 vendor/github.com/gregjones/httpcache/diskcache/BUILD.bazel delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/BUILD.bazel delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/BUILD.bazel delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel delete mode 100644 vendor/github.com/hashicorp/golang-lru/BUILD.bazel delete mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/BUILD.bazel delete mode 100644 vendor/github.com/howeyc/gopass/BUILD.bazel delete mode 100644 vendor/github.com/imdario/mergo/BUILD.bazel delete mode 100644 vendor/github.com/inconshreveable/mousetrap/BUILD.bazel delete mode 100644 vendor/github.com/jmespath/go-jmespath/BUILD.bazel delete mode 100644 vendor/github.com/json-iterator/go/BUILD.bazel delete mode 100644 vendor/github.com/juju/ratelimit/BUILD.bazel delete mode 100644 vendor/github.com/kballard/go-shellquote/BUILD.bazel delete mode 100644 vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/BUILD.bazel delete mode 100644 vendor/github.com/libvirt/libvirt-go/BUILD.bazel delete mode 100644 vendor/github.com/mailru/easyjson/BUILD.bazel delete mode 100644 vendor/github.com/mailru/easyjson/buffer/BUILD.bazel delete mode 100644 vendor/github.com/mailru/easyjson/jlexer/BUILD.bazel delete mode 100644 vendor/github.com/mailru/easyjson/jwriter/BUILD.bazel delete mode 100644 vendor/github.com/mattn/go-colorable/BUILD.bazel delete mode 100644 vendor/github.com/mattn/go-isatty/BUILD.bazel delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel delete mode 100644 vendor/github.com/mgutz/ansi/BUILD.bazel delete mode 100644 vendor/github.com/mxk/go-flowrate/flowrate/BUILD.bazel delete mode 100644 vendor/github.com/openshift/hive/contrib/pkg/aws_tag_deprovision/BUILD.bazel delete mode 100644 vendor/github.com/pborman/uuid/BUILD.bazel delete mode 100644 vendor/github.com/peterbourgon/diskv/BUILD.bazel delete mode 100644 vendor/github.com/pmezard/go-difflib/difflib/BUILD.bazel delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel delete mode 100644 vendor/github.com/prometheus/client_model/go/BUILD.bazel delete mode 100644 vendor/github.com/prometheus/common/expfmt/BUILD.bazel delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/BUILD.bazel delete mode 100644 vendor/github.com/prometheus/common/model/BUILD.bazel delete mode 100644 vendor/github.com/prometheus/procfs/BUILD.bazel delete mode 100644 vendor/github.com/prometheus/procfs/xfs/BUILD.bazel delete mode 100644 vendor/github.com/shurcooL/vfsgen/BUILD.bazel delete mode 100644 vendor/github.com/sirupsen/logrus/BUILD.bazel delete mode 100644 vendor/github.com/spf13/cobra/BUILD.bazel delete mode 100644 vendor/github.com/spf13/pflag/BUILD.bazel delete mode 100644 vendor/github.com/stretchr/testify/BUILD.bazel delete mode 100644 vendor/github.com/stretchr/testify/assert/BUILD.bazel delete mode 100644 vendor/github.com/ugorji/go/codec/BUILD.bazel delete mode 100644 vendor/github.com/vincent-petithory/dataurl/BUILD.bazel delete mode 100644 vendor/go4.org/errorutil/BUILD.bazel delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/BUILD.bazel delete mode 100644 vendor/golang.org/x/net/context/BUILD.bazel delete mode 100644 vendor/golang.org/x/net/html/BUILD.bazel delete mode 100644 vendor/golang.org/x/net/html/atom/BUILD.bazel delete mode 100644 vendor/golang.org/x/net/http2/BUILD.bazel delete mode 100644 vendor/golang.org/x/net/http2/hpack/BUILD.bazel delete mode 100644 vendor/golang.org/x/net/idna/BUILD.bazel delete mode 100644 vendor/golang.org/x/net/internal/timeseries/BUILD.bazel delete mode 100644 vendor/golang.org/x/net/lex/httplex/BUILD.bazel delete mode 100644 vendor/golang.org/x/net/trace/BUILD.bazel delete mode 100644 vendor/golang.org/x/net/websocket/BUILD.bazel delete mode 100644 vendor/golang.org/x/sys/unix/BUILD.bazel delete mode 100644 vendor/golang.org/x/sys/windows/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/cases/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/internal/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/internal/tag/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/language/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/runes/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/secure/bidirule/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/secure/precis/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/transform/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/unicode/bidi/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/unicode/norm/BUILD.bazel delete mode 100644 vendor/golang.org/x/text/width/BUILD.bazel delete mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/codes/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/credentials/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/grpclog/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/internal/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/keepalive/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/metadata/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/naming/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/peer/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/stats/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/status/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/tap/BUILD.bazel delete mode 100644 vendor/google.golang.org/grpc/transport/BUILD.bazel delete mode 100644 vendor/gopkg.in/AlecAivazis/survey.v1/BUILD.bazel delete mode 100644 vendor/gopkg.in/AlecAivazis/survey.v1/core/BUILD.bazel delete mode 100644 vendor/gopkg.in/AlecAivazis/survey.v1/terminal/BUILD.bazel delete mode 100644 vendor/gopkg.in/alecthomas/kingpin.v2/BUILD.bazel delete mode 100644 vendor/gopkg.in/inf.v0/BUILD.bazel delete mode 100644 vendor/gopkg.in/yaml.v2/BUILD.bazel delete mode 100644 vendor/k8s.io/api/admission/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/apps/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/apps/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/apps/v1beta2/BUILD.bazel delete mode 100644 vendor/k8s.io/api/authentication/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/authorization/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/autoscaling/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/batch/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/batch/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/core/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/events/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/networking/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/policy/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/rbac/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/storage/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/api/storage/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/equality/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/path/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/fields/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/selection/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/types/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/clock/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/diff/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/framer/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/json/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/rand/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/wait/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/waitgroup/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/version/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/metrics/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/validation/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/audit/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/audit/policy/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizer/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/authorization/union/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/openapi/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/features/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/server/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/server/healthz/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/server/httplog/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/server/mux/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/data/swagger/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/names/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/util/flushwriter/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/util/trace/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD.bazel delete mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/discovery/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/apps/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/autoscaling/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v2beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/batch/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/batch/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/batch/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/batch/v2alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/certificates/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/certificates/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/core/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/events/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/events/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/extensions/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/internalinterfaces/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/networking/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/networking/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/policy/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/policy/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/rbac/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/scheduling/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/scheduling/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/settings/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/settings/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/storage/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v2beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/batch/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/batch/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/batch/v2alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/certificates/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/events/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/networking/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/scheduling/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/settings/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1alpha1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/pkg/version/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/rest/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/rest/watch/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/tools/auth/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/tools/cache/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/tools/metrics/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/tools/pager/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/tools/reference/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/transport/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/util/buffer/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/util/cert/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/util/flowcontrol/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/util/homedir/BUILD.bazel delete mode 100644 vendor/k8s.io/client-go/util/integer/BUILD.bazel delete mode 100644 vendor/k8s.io/kube-openapi/pkg/builder/BUILD.bazel delete mode 100644 vendor/k8s.io/kube-openapi/pkg/common/BUILD.bazel delete mode 100644 vendor/k8s.io/kube-openapi/pkg/handler/BUILD.bazel delete mode 100644 vendor/k8s.io/kube-openapi/pkg/util/BUILD.bazel delete mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/BUILD.bazel diff --git a/BUILD.bazel b/BUILD.bazel deleted file mode 100644 index dbeed755793..00000000000 --- a/BUILD.bazel +++ /dev/null @@ -1,140 +0,0 @@ -load("//bazel-rules:gen_test.bzl", "gen_test") -load("//:version.bzl", "TECTONIC_VERSION") - -package( - default_visibility = ["//visibility:public"], -) - -config_setting( - name = "darwin", - values = {"cpu": "darwin"}, - visibility = ["//visibility:public"], -) - -config_setting( - name = "linux", - values = {"cpu": "k8"}, # don't ask... - visibility = ["//visibility:public"], -) - -genrule( - name = "terraform_runtime_fmt", - srcs = select({ - "//:linux": ["@terraform_runtime_linux//:terraform"], - "//:darwin": ["@terraform_runtime_darwin//:terraform"], - }), - outs = ["bin/terraform"], - cmd = "cp $(<) $(@)", - executable = True, - output_to_bindir = 1, -) - -load("@io_bazel_rules_go//go:def.bzl", "go_prefix") - -go_prefix("github.com/openshift/installer") - -load("@bazel_gazelle//:def.bzl", "gazelle") - -gazelle( - name = "gazelle", - command = "fix", - external = "vendored", - prefix = "github.com/openshift/installer", -) - -alias( - name = "smoke_tests", - actual = "//tests/smoke:go_default_test", -) - -alias( - name = "cli", - actual = "//installer/cmd/tectonic", -) - -template_files = glob([ - "modules/**/*", - "steps/**/*", - "config.tf", -]) - -exports_files(template_files) - -filegroup( - name = "template_resources", - srcs = template_files, -) - -load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar") - -alias( - name = "tarball", - actual = ":tectonic-%s" % TECTONIC_VERSION, -) - -pkg_tar( - name = "tectonic-%s" % TECTONIC_VERSION, - srcs = [ - "//:template_resources", - "//examples:config_examples", - ], - extension = "tar.gz", - mode = "0666", - package_dir = "tectonic-%s" % TECTONIC_VERSION, - strip_prefix = ".", - deps = [ - ":cli_bin", - ":old_cli_bin", - ":tf_bin", - ], -) - -pkg_tar( - name = "old_cli_bin", - srcs = ["//installer/cmd/tectonic"], - mode = "0555", - package_dir = "installer", -) - -pkg_tar( - name = "cli_bin", - srcs = ["//cmd/openshift-install"], - mode = "0555", - package_dir = "installer", -) - -filegroup( - name = "terraform_runtime", - srcs = select({ - "//:linux": ["@terraform_runtime_linux//:terraform"], - "//:darwin": ["@terraform_runtime_darwin//:terraform"], - }), -) - -pkg_tar( - name = "tf_bin", - srcs = [":terraform_runtime"], - mode = "0777", - package_dir = "installer", -) - -filegroup( - name = "terraform_files", - srcs = glob( - ["modules/**/*.tf"], - exclude_directories = 1, - ) + glob( - ["steps/**/*.tf"], - exclude_directories = 1, - ) + ["config.tf"], -) - -gen_test( - name = "terraform_fmt", - size = "small", - command = "./bin/terraform fmt -list -check -write=false", - deps = [ - ":terraform_files", - ":terraform_runtime_fmt", - ], -) diff --git a/WORKSPACE b/WORKSPACE deleted file mode 100644 index 00b82d47552..00000000000 --- a/WORKSPACE +++ /dev/null @@ -1,40 +0,0 @@ -workspace(name = "installer") - -terrafom_version = "0.11.8" - -supported_platforms = [ - "linux", - "darwin", -] - -http_archive( - name = "io_bazel_rules_go", - url = "https://github.com/bazelbuild/rules_go/releases/download/0.12.1/rules_go-0.12.1.tar.gz", - sha256 = "8b68d0630d63d95dacc0016c3bb4b76154fe34fca93efd65d1c366de3fcb4294", -) - -http_archive( - name = "bazel_gazelle", - url = "https://github.com/bazelbuild/bazel-gazelle/releases/download/0.12.0/bazel-gazelle-0.12.0.tar.gz", - sha256 = "ddedc7aaeb61f2654d7d7d4fd7940052ea992ccdb031b8f9797ed143ac7e8d43", -) - -load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains") - -go_rules_dependencies() - -go_register_toolchains() - -load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") - -gazelle_dependencies() - -# Runtime binary dependencies follow. -# These will be fetched and included in the build output verbatim. -# -[new_http_archive( - name = "terraform_runtime_%s" % platform, - build_file_content = """exports_files(["terraform"], visibility = ["//visibility:public"])""", - type = "zip", - url = "https://releases.hashicorp.com/terraform/%s/terraform_%s_%s_amd64.zip" % (terrafom_version, terrafom_version, platform), -) for platform in supported_platforms] diff --git a/bazel-rules/BUILD.bazel b/bazel-rules/BUILD.bazel deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/bazel-rules/gen_test.bzl b/bazel-rules/gen_test.bzl deleted file mode 100644 index fba807abc06..00000000000 --- a/bazel-rules/gen_test.bzl +++ /dev/null @@ -1,28 +0,0 @@ -def generate_script(command): - return """ -set -ex -{command} -""".format(command=command) - -def _impl(ctx): - script = generate_script(ctx.attr.command) - - # Write the file, it is executed by 'bazel test'. - ctx.actions.write( - output=ctx.outputs.executable, - content=script - ) - - # To ensure the files needed by the script are available, we put them in - # the runfiles. - runfiles = ctx.runfiles(files=ctx.files.deps) - return [DefaultInfo(runfiles=runfiles)] - -gen_test = rule( - implementation=_impl, - attrs={ - "command": attr.string(), - "deps": attr.label_list(allow_files=True), - }, - test=True, -) diff --git a/buildvars.sh b/buildvars.sh deleted file mode 100755 index cfea530ad05..00000000000 --- a/buildvars.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Vars exported to the build info -echo TECTONIC_VERSION "${TECTONIC_VERSION}" -echo BUILD_TIME "$(date -u '+%Y-%m-%dT%H:%M:%S%z')" diff --git a/cmd/openshift-install/BUILD.bazel b/cmd/openshift-install/BUILD.bazel deleted file mode 100644 index 94ddec3025f..00000000000 --- a/cmd/openshift-install/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") - -go_library( - name = "go_default_library", - srcs = ["main.go"], - importpath = "github.com/openshift/installer/cmd/openshift-install", - visibility = ["//visibility:private"], - deps = [ - "//pkg/asset:go_default_library", - "//pkg/asset/stock:go_default_library", - "//pkg/destroy:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - "//vendor/gopkg.in/alecthomas/kingpin.v2:go_default_library", - ], -) - -go_binary( - name = "openshift-install", - out = "openshift-install", - embed = [":go_default_library"], - # Use pure to build a pure-go binary. - # This has the nice side effect of making the binary statically linked. - pure = "on", - visibility = ["//visibility:public"], -) diff --git a/data/BUILD.bazel b/data/BUILD.bazel deleted file mode 100644 index 975c83922b4..00000000000 --- a/data/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "assets.go", - "unpack.go", - ], - importpath = "github.com/openshift/installer/data", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["unpack_test.go"], - embed = [":go_default_library"], -) diff --git a/examples/BUILD.bazel b/examples/BUILD.bazel deleted file mode 100644 index 37baf66af6a..00000000000 --- a/examples/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -config_examples = glob(["*.yaml"]) - -filegroup( - name = "config_examples", - srcs = config_examples, - visibility = ["//visibility:public"], -) - -exports_files(config_examples) diff --git a/installer/BUILD.bazel b/installer/BUILD.bazel deleted file mode 100644 index 5d185a6b736..00000000000 --- a/installer/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -test_suite( - name = "cli_units", - tests = [ - "//installer/pkg/config:go_default_test", - "//installer/pkg/config-generator:go_default_test", - "//installer/pkg/tls:go_default_test", - "//installer/pkg/validate:go_default_test", - "//installer/pkg/workflow:go_default_test", - ], -) diff --git a/installer/cmd/tectonic/BUILD.bazel b/installer/cmd/tectonic/BUILD.bazel deleted file mode 100644 index 46edb574ed0..00000000000 --- a/installer/cmd/tectonic/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") - -go_library( - name = "go_default_library", - srcs = ["main.go"], - importpath = "github.com/openshift/installer/installer/cmd/tectonic", - visibility = ["//visibility:private"], - deps = [ - "//installer/pkg/workflow:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - "//vendor/gopkg.in/alecthomas/kingpin.v2:go_default_library", - ], -) - -go_binary( - name = "tectonic", - out = "tectonic", - data = ["//:template_resources"], - embed = [":go_default_library"], - # Use pure to build a pure-go binary. - # This has the nice side effect of making the binary statically linked. - pure = "on", - visibility = ["//visibility:public"], -) diff --git a/installer/pkg/config-generator/BUILD.bazel b/installer/pkg/config-generator/BUILD.bazel deleted file mode 100644 index 222440da820..00000000000 --- a/installer/pkg/config-generator/BUILD.bazel +++ /dev/null @@ -1,43 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "generator.go", - "ignition.go", - "tls.go", - ], - importpath = "github.com/openshift/installer/installer/pkg/config-generator", - visibility = ["//visibility:public"], - deps = [ - "//installer/pkg/copy:go_default_library", - "//pkg/asset/tls:go_default_library", - "//pkg/ipnet:go_default_library", - "//pkg/rhcos:go_default_library", - "//pkg/types:go_default_library", - "//pkg/types/config:go_default_library", - "//vendor/github.com/apparentlymart/go-cidr/cidr:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_2:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_2/types:go_default_library", - "//vendor/github.com/coreos/tectonic-config/config/kube-addon:go_default_library", - "//vendor/github.com/coreos/tectonic-config/config/kube-core:go_default_library", - "//vendor/github.com/coreos/tectonic-config/config/tectonic-network:go_default_library", - "//vendor/github.com/coreos/tectonic-config/config/tectonic-utility:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/vincent-petithory/dataurl:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - ], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = ["generator_test.go"], - data = glob(["fixtures/**"]), - embed = [":go_default_library"], - deps = [ - "//pkg/asset/tls:go_default_library", - "//pkg/types/config:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", - ], -) diff --git a/installer/pkg/copy/BUILD.bazel b/installer/pkg/copy/BUILD.bazel deleted file mode 100644 index 44e66984d1a..00000000000 --- a/installer/pkg/copy/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["copy.go"], - importpath = "github.com/openshift/installer/installer/pkg/copy", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = ["copy_test.go"], - embed = [":go_default_library"], -) diff --git a/installer/pkg/validate/BUILD.bazel b/installer/pkg/validate/BUILD.bazel deleted file mode 100644 index d03c33ca7cb..00000000000 --- a/installer/pkg/validate/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "last_ip_test.go", - "validate_test.go", - ], - embed = [":go_default_library"], - deps = ["//pkg/asset/tls:go_default_library"], -) - -go_library( - name = "go_default_library", - srcs = ["validate.go"], - data = glob(["fixtures/**"]), - importpath = "github.com/openshift/installer/installer/pkg/validate", - visibility = ["//visibility:public"], -) diff --git a/installer/pkg/workflow/BUILD.bazel b/installer/pkg/workflow/BUILD.bazel deleted file mode 100644 index d4a2f06ec38..00000000000 --- a/installer/pkg/workflow/BUILD.bazel +++ /dev/null @@ -1,39 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "convert.go", - "destroy.go", - "init.go", - "install.go", - "utils.go", - "workflow.go", - ], - importpath = "github.com/openshift/installer/installer/pkg/workflow", - visibility = ["//visibility:public"], - deps = [ - "//installer/pkg/config-generator:go_default_library", - "//pkg/terraform:go_default_library", - "//pkg/types/config:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", - "//vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset:go_default_library", - ], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "init_test.go", - "workflow_test.go", - ], - data = glob(["fixtures/**"]), - embed = [":go_default_library"], - deps = ["//pkg/types/config:go_default_library"], -) diff --git a/pkg/asset/BUILD.bazel b/pkg/asset/BUILD.bazel deleted file mode 100644 index 6efbeb1c2ed..00000000000 --- a/pkg/asset/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "asset.go", - "doc.go", - "state.go", - "store.go", - "userprovided.go", - ], - importpath = "github.com/openshift/installer/pkg/asset", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/AlecAivazis/survey:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "state_test.go", - "store_test.go", - ], - embed = [":go_default_library"], - deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], -) diff --git a/pkg/asset/cluster/BUILD.bazel b/pkg/asset/cluster/BUILD.bazel deleted file mode 100644 index cf4b674de42..00000000000 --- a/pkg/asset/cluster/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "doc.go", - "stock.go", - "tfvars.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/cluster", - visibility = ["//visibility:public"], - deps = [ - "//data:go_default_library", - "//pkg/asset:go_default_library", - "//pkg/asset/ignition:go_default_library", - "//pkg/asset/installconfig:go_default_library", - "//pkg/asset/kubeconfig:go_default_library", - "//pkg/terraform:go_default_library", - "//pkg/types/config:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - ], -) diff --git a/pkg/asset/ignition/BUILD.bazel b/pkg/asset/ignition/BUILD.bazel deleted file mode 100644 index 8b9ebf728e6..00000000000 --- a/pkg/asset/ignition/BUILD.bazel +++ /dev/null @@ -1,43 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "bootstrap.go", - "doc.go", - "master.go", - "node.go", - "stock.go", - "worker.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/ignition", - visibility = ["//visibility:public"], - deps = [ - "//pkg/asset:go_default_library", - "//pkg/asset/ignition/content:go_default_library", - "//pkg/asset/installconfig:go_default_library", - "//pkg/asset/kubeconfig:go_default_library", - "//pkg/asset/manifests:go_default_library", - "//pkg/asset/tls:go_default_library", - "//pkg/types:go_default_library", - "//vendor/github.com/coreos/ignition/config/util:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_2/types:go_default_library", - "//vendor/github.com/vincent-petithory/dataurl:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "master_test.go", - "testasset_test.go", - "testutils_test.go", - "worker_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//pkg/asset:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", - "//vendor/github.com/vincent-petithory/dataurl:go_default_library", - ], -) diff --git a/pkg/asset/ignition/content/BUILD.bazel b/pkg/asset/ignition/content/BUILD.bazel deleted file mode 100644 index 7fd8c130e9f..00000000000 --- a/pkg/asset/ignition/content/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bootkube.go", - "doc.go", - "kubelet.go", - "tectonic.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/ignition/content", - visibility = ["//visibility:public"], -) diff --git a/pkg/asset/installconfig/BUILD.bazel b/pkg/asset/installconfig/BUILD.bazel deleted file mode 100644 index 32df4405225..00000000000 --- a/pkg/asset/installconfig/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "clusterid.go", - "doc.go", - "installconfig.go", - "platform.go", - "ssh.go", - "stock.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/installconfig", - visibility = ["//visibility:public"], - deps = [ - "//installer/pkg/validate:go_default_library", - "//pkg/asset:go_default_library", - "//pkg/ipnet:go_default_library", - "//pkg/types:go_default_library", - "//vendor/github.com/AlecAivazis/survey:go_default_library", - "//vendor/github.com/apparentlymart/go-cidr/cidr:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/pborman/uuid:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["installconfig_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/asset:go_default_library", - "//pkg/ipnet:go_default_library", - "//pkg/types:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", - ], -) diff --git a/pkg/asset/kubeconfig/BUILD.bazel b/pkg/asset/kubeconfig/BUILD.bazel deleted file mode 100644 index eb2de04d738..00000000000 --- a/pkg/asset/kubeconfig/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "kubeconfig.go", - "stock.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/kubeconfig", - visibility = ["//visibility:public"], - deps = [ - "//pkg/asset:go_default_library", - "//pkg/asset/installconfig:go_default_library", - "//pkg/asset/tls:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["kubeconfig_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/asset:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", - ], -) diff --git a/pkg/asset/manifests/BUILD.bazel b/pkg/asset/manifests/BUILD.bazel deleted file mode 100644 index bdcdf4c9776..00000000000 --- a/pkg/asset/manifests/BUILD.bazel +++ /dev/null @@ -1,41 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "kube-addon-operator.go", - "kube-core-operator.go", - "machine-api-operator.go", - "network-operator.go", - "operators.go", - "stock.go", - "tectonic.go", - "template.go", - "utils.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/manifests", - visibility = ["//visibility:public"], - deps = [ - "//pkg/asset:go_default_library", - "//pkg/asset/installconfig:go_default_library", - "//pkg/asset/kubeconfig:go_default_library", - "//pkg/asset/manifests/content/bootkube:go_default_library", - "//pkg/asset/manifests/content/tectonic/ingress:go_default_library", - "//pkg/asset/manifests/content/tectonic/rbac:go_default_library", - "//pkg/asset/manifests/content/tectonic/secrets:go_default_library", - "//pkg/asset/manifests/content/tectonic/security:go_default_library", - "//pkg/asset/manifests/content/tectonic/updater:go_default_library", - "//pkg/asset/manifests/content/tectonic/updater/appversions:go_default_library", - "//pkg/asset/manifests/content/tectonic/updater/operators:go_default_library", - "//pkg/asset/tls:go_default_library", - "//pkg/rhcos:go_default_library", - "//pkg/types:go_default_library", - "//vendor/github.com/apparentlymart/go-cidr/cidr:go_default_library", - "//vendor/github.com/coreos/tectonic-config/config/kube-addon:go_default_library", - "//vendor/github.com/coreos/tectonic-config/config/kube-core:go_default_library", - "//vendor/github.com/coreos/tectonic-config/config/tectonic-network:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", - ], -) diff --git a/pkg/asset/manifests/content/bootkube/BUILD.bazel b/pkg/asset/manifests/content/bootkube/BUILD.bazel deleted file mode 100644 index a117993d240..00000000000 --- a/pkg/asset/manifests/content/bootkube/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "01-tectonic-namespace.go", - "02-ingress-namespace.go", - "03-openshift-web-console-namespace.go", - "04-openshift-machine-config-operator.go", - "05-openshift-cluster-api-namespace.go", - "app-version-kind.go", - "app-version-mao.go", - "app-version-tectonic-network.go", - "cluster-apiserver-certs.go", - "ign-config.go", - "kube-apiserver-secret.go", - "kube-cloud-config.go", - "kube-controller-manager-secret.go", - "machine-api-operator.go", - "machine-config-operator-00-config-crd.go", - "machine-config-operator-01-images-configmap.go", - "machine-config-operator-02-rbac.go", - "machine-config-operator-03-deployment.go", - "machine-config-server-tls-secret.go", - "openshift-apiserver-secret.go", - "operatorstatus-crd.go", - "pull.go", - "tectonic-network-operator.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/manifests/content/bootkube", - visibility = ["//visibility:public"], -) diff --git a/pkg/asset/manifests/content/tectonic/ingress/BUILD.bazel b/pkg/asset/manifests/content/tectonic/ingress/BUILD.bazel deleted file mode 100644 index 5a2407d61a4..00000000000 --- a/pkg/asset/manifests/content/tectonic/ingress/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster-config.go", - "pull.go", - "svc-account.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/manifests/content/tectonic/ingress", - visibility = ["//visibility:public"], -) diff --git a/pkg/asset/manifests/content/tectonic/rbac/BUILD.bazel b/pkg/asset/manifests/content/tectonic/rbac/BUILD.bazel deleted file mode 100644 index 126e4729c48..00000000000 --- a/pkg/asset/manifests/content/tectonic/rbac/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "binding-admin.go", - "binding-discovery.go", - "role-admin.go", - "role-user.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/manifests/content/tectonic/rbac", - visibility = ["//visibility:public"], -) diff --git a/pkg/asset/manifests/content/tectonic/secrets/BUILD.bazel b/pkg/asset/manifests/content/tectonic/secrets/BUILD.bazel deleted file mode 100644 index e6d51f8e086..00000000000 --- a/pkg/asset/manifests/content/tectonic/secrets/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "ca-cert.go", - "ingress-tls.go", - "pull.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/manifests/content/tectonic/secrets", - visibility = ["//visibility:public"], -) diff --git a/pkg/asset/manifests/content/tectonic/security/BUILD.bazel b/pkg/asset/manifests/content/tectonic/security/BUILD.bazel deleted file mode 100644 index 97bb9433685..00000000000 --- a/pkg/asset/manifests/content/tectonic/security/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["priviledged-scc-tectonic.go"], - importpath = "github.com/openshift/installer/pkg/asset/manifests/content/tectonic/security", - visibility = ["//visibility:public"], -) diff --git a/pkg/asset/manifests/content/tectonic/updater/BUILD.bazel b/pkg/asset/manifests/content/tectonic/updater/BUILD.bazel deleted file mode 100644 index 5c56adde9bd..00000000000 --- a/pkg/asset/manifests/content/tectonic/updater/BUILD.bazel +++ /dev/null @@ -1,11 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "app-version-kind.go", - "migration-status-kind.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/manifests/content/tectonic/updater", - visibility = ["//visibility:public"], -) diff --git a/pkg/asset/manifests/content/tectonic/updater/appversions/BUILD.bazel b/pkg/asset/manifests/content/tectonic/updater/appversions/BUILD.bazel deleted file mode 100644 index a5c4c8af60c..00000000000 --- a/pkg/asset/manifests/content/tectonic/updater/appversions/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "app-version-kube-addon.go", - "app-version-kube-core.go", - "app-version-tectonic-cluster.go", - "app-version-tectonic-ingress.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/manifests/content/tectonic/updater/appversions", - visibility = ["//visibility:public"], -) diff --git a/pkg/asset/manifests/content/tectonic/updater/operators/BUILD.bazel b/pkg/asset/manifests/content/tectonic/updater/operators/BUILD.bazel deleted file mode 100644 index ad5eeeedd75..00000000000 --- a/pkg/asset/manifests/content/tectonic/updater/operators/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "kube-addon-operator.go", - "kube-core-operator.go", - "tectonic-ingress-controller-operator.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/manifests/content/tectonic/updater/operators", - visibility = ["//visibility:public"], -) diff --git a/pkg/asset/metadata/BUILD.bazel b/pkg/asset/metadata/BUILD.bazel deleted file mode 100644 index 0e9e4cbc3c8..00000000000 --- a/pkg/asset/metadata/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "metadata.go", - "stock.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/metadata", - visibility = ["//visibility:public"], - deps = [ - "//pkg/asset:go_default_library", - "//pkg/asset/cluster:go_default_library", - "//pkg/asset/installconfig:go_default_library", - "//pkg/types:go_default_library", - ], -) diff --git a/pkg/asset/stock/BUILD.bazel b/pkg/asset/stock/BUILD.bazel deleted file mode 100644 index de805fe0bc8..00000000000 --- a/pkg/asset/stock/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "stock.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/stock", - visibility = ["//visibility:public"], - deps = [ - "//pkg/asset/cluster:go_default_library", - "//pkg/asset/ignition:go_default_library", - "//pkg/asset/installconfig:go_default_library", - "//pkg/asset/kubeconfig:go_default_library", - "//pkg/asset/manifests:go_default_library", - "//pkg/asset/metadata:go_default_library", - "//pkg/asset/tls:go_default_library", - ], -) diff --git a/pkg/asset/tls/BUILD.bazel b/pkg/asset/tls/BUILD.bazel deleted file mode 100644 index 6c1c64a3005..00000000000 --- a/pkg/asset/tls/BUILD.bazel +++ /dev/null @@ -1,39 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "certkey_test.go", - "tls_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//pkg/asset:go_default_library", - "//pkg/types:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = [ - "certkey.go", - "doc.go", - "helper.go", - "keypair.go", - "root.go", - "stock.go", - "tls.go", - "utils.go", - ], - importpath = "github.com/openshift/installer/pkg/asset/tls", - visibility = ["//visibility:public"], - deps = [ - "//pkg/asset:go_default_library", - "//pkg/asset/installconfig:go_default_library", - "//pkg/types:go_default_library", - "//vendor/github.com/apparentlymart/go-cidr/cidr:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - ], -) diff --git a/pkg/destroy/BUILD.bazel b/pkg/destroy/BUILD.bazel deleted file mode 100644 index 88ead3d58b7..00000000000 --- a/pkg/destroy/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "destroyer.go", - "doc.go", - ], - importpath = "github.com/openshift/installer/pkg/destroy", - visibility = ["//visibility:public"], - deps = [ - "//pkg/asset/metadata:go_default_library", - "//pkg/types:go_default_library", - "//vendor/github.com/openshift/hive/contrib/pkg/aws_tag_deprovision:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - ], -) diff --git a/pkg/destroy/libvirt/BUILD.bazel b/pkg/destroy/libvirt/BUILD.bazel deleted file mode 100644 index a77a1b74374..00000000000 --- a/pkg/destroy/libvirt/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["libvirt_prefix_deprovision.go"], - importpath = "github.com/openshift/installer/pkg/destroy/libvirt", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/libvirt/libvirt-go:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - ], -) diff --git a/pkg/ipnet/BUILD.bazel b/pkg/ipnet/BUILD.bazel deleted file mode 100644 index 8b0557e8054..00000000000 --- a/pkg/ipnet/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = ["ipnet.go"], - importpath = "github.com/openshift/installer/pkg/ipnet", - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["ipnet_test.go"], - embed = [":go_default_library"], -) diff --git a/pkg/rhcos/BUILD.bazel b/pkg/rhcos/BUILD.bazel deleted file mode 100644 index 2fbd3b81412..00000000000 --- a/pkg/rhcos/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "ami.go", - "doc.go", - ], - importpath = "github.com/openshift/installer/pkg/rhcos", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library", - ], -) diff --git a/pkg/terraform/BUILD.bazel b/pkg/terraform/BUILD.bazel deleted file mode 100644 index 516a290f965..00000000000 --- a/pkg/terraform/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "executor.go", - "terraform.go", - ], - importpath = "github.com/openshift/installer/pkg/terraform", - visibility = ["//visibility:public"], - deps = ["//pkg/types/config:go_default_library"], -) diff --git a/pkg/types/BUILD.bazel b/pkg/types/BUILD.bazel deleted file mode 100644 index 4a6b7aaf5bb..00000000000 --- a/pkg/types/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clustermetadata.go", - "doc.go", - "installconfig.go", - "machinepools.go", - ], - importpath = "github.com/openshift/installer/pkg/types", - visibility = ["//visibility:public"], - deps = [ - "//pkg/ipnet:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - ], -) diff --git a/pkg/types/config/BUILD.bazel b/pkg/types/config/BUILD.bazel deleted file mode 100644 index 7e02f51a847..00000000000 --- a/pkg/types/config/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "parser.go", - "types.go", - "validate.go", - ], - importpath = "github.com/openshift/installer/pkg/types/config", - visibility = ["//visibility:public"], - deps = [ - "//installer/pkg/validate:go_default_library", - "//pkg/rhcos:go_default_library", - "//pkg/types:go_default_library", - "//pkg/types/config/aws:go_default_library", - "//pkg/types/config/libvirt:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_2:go_default_library", - "//vendor/github.com/coreos/tectonic-config/config/tectonic-network:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", - ], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = ["validate_test.go"], - data = glob(["fixtures/**"]), - embed = [":go_default_library"], - deps = [ - "//pkg/types/config/aws:go_default_library", - "//pkg/types/config/libvirt:go_default_library", - ], -) diff --git a/pkg/types/config/aws/BUILD.bazel b/pkg/types/config/aws/BUILD.bazel deleted file mode 100644 index 4d4f29acd57..00000000000 --- a/pkg/types/config/aws/BUILD.bazel +++ /dev/null @@ -1,8 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["aws.go"], - importpath = "github.com/openshift/installer/pkg/types/config/aws", - visibility = ["//visibility:public"], -) diff --git a/pkg/types/config/libvirt/BUILD.bazel b/pkg/types/config/libvirt/BUILD.bazel deleted file mode 100644 index a3c2d4a20aa..00000000000 --- a/pkg/types/config/libvirt/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cache.go", - "libvirt.go", - ], - importpath = "github.com/openshift/installer/pkg/types/config/libvirt", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/apparentlymart/go-cidr/cidr:go_default_library", - "//vendor/github.com/gregjones/httpcache:go_default_library", - "//vendor/github.com/gregjones/httpcache/diskcache:go_default_library", - ], -) diff --git a/tests/smoke/BUILD.bazel b/tests/smoke/BUILD.bazel deleted file mode 100644 index 428e9080886..00000000000 --- a/tests/smoke/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_test") - -go_test( - name = "go_default_test", - srcs = [ - "cluster_test.go", - "common_test.go", - "smoke_test.go", - ], - pure = "on", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/clientcmd:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/resource:go_default_library", - ], -) diff --git a/tests/smoke/vendor/cloud.google.com/go/BUILD.bazel b/tests/smoke/vendor/cloud.google.com/go/BUILD.bazel deleted file mode 100644 index 04890807baa..00000000000 --- a/tests/smoke/vendor/cloud.google.com/go/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["cloud.go"], - importmap = "installer/tests/smoke/vendor/cloud.google.com/go", - importpath = "cloud.google.com/go", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/cloud.google.com/go/compute/metadata/BUILD.bazel b/tests/smoke/vendor/cloud.google.com/go/compute/metadata/BUILD.bazel deleted file mode 100644 index 7d1d18f06b0..00000000000 --- a/tests/smoke/vendor/cloud.google.com/go/compute/metadata/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["metadata.go"], - importmap = "installer/tests/smoke/vendor/cloud.google.com/go/compute/metadata", - importpath = "cloud.google.com/go/compute/metadata", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/cloud.google.com/go/internal:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/context:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/context/ctxhttp:go_default_library", - ], -) diff --git a/tests/smoke/vendor/cloud.google.com/go/internal/BUILD.bazel b/tests/smoke/vendor/cloud.google.com/go/internal/BUILD.bazel deleted file mode 100644 index 046f47b596a..00000000000 --- a/tests/smoke/vendor/cloud.google.com/go/internal/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["cloud.go"], - importmap = "installer/tests/smoke/vendor/cloud.google.com/go/internal", - importpath = "cloud.google.com/go/internal", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/Azure/go-autorest/autorest/BUILD.bazel b/tests/smoke/vendor/github.com/Azure/go-autorest/autorest/BUILD.bazel deleted file mode 100644 index c471ed5cafd..00000000000 --- a/tests/smoke/vendor/github.com/Azure/go-autorest/autorest/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "autorest.go", - "client.go", - "error.go", - "preparer.go", - "responder.go", - "sender.go", - "utility.go", - "version.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/Azure/go-autorest/autorest", - importpath = "github.com/Azure/go-autorest/autorest", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/Azure/go-autorest/autorest/azure/BUILD.bazel b/tests/smoke/vendor/github.com/Azure/go-autorest/autorest/azure/BUILD.bazel deleted file mode 100644 index df8b4c10128..00000000000 --- a/tests/smoke/vendor/github.com/Azure/go-autorest/autorest/azure/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "async.go", - "azure.go", - "config.go", - "devicetoken.go", - "environments.go", - "persist.go", - "token.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/Azure/go-autorest/autorest/azure", - importpath = "github.com/Azure/go-autorest/autorest/azure", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/Azure/go-autorest/autorest:go_default_library", - "//tests/smoke/vendor/github.com/Azure/go-autorest/autorest/date:go_default_library", - "//tests/smoke/vendor/github.com/dgrijalva/jwt-go:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/Azure/go-autorest/autorest/date/BUILD.bazel b/tests/smoke/vendor/github.com/Azure/go-autorest/autorest/date/BUILD.bazel deleted file mode 100644 index becc363c25b..00000000000 --- a/tests/smoke/vendor/github.com/Azure/go-autorest/autorest/date/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "date.go", - "time.go", - "timerfc1123.go", - "utility.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/Azure/go-autorest/autorest/date", - importpath = "github.com/Azure/go-autorest/autorest/date", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/PuerkitoBio/purell/BUILD.bazel b/tests/smoke/vendor/github.com/PuerkitoBio/purell/BUILD.bazel deleted file mode 100644 index 9f999df4d05..00000000000 --- a/tests/smoke/vendor/github.com/PuerkitoBio/purell/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["purell.go"], - importmap = "installer/tests/smoke/vendor/github.com/PuerkitoBio/purell", - importpath = "github.com/PuerkitoBio/purell", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/PuerkitoBio/urlesc:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/idna:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/secure/precis:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/unicode/norm:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/PuerkitoBio/urlesc/BUILD.bazel b/tests/smoke/vendor/github.com/PuerkitoBio/urlesc/BUILD.bazel deleted file mode 100644 index 52e61c14bef..00000000000 --- a/tests/smoke/vendor/github.com/PuerkitoBio/urlesc/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["urlesc.go"], - importmap = "installer/tests/smoke/vendor/github.com/PuerkitoBio/urlesc", - importpath = "github.com/PuerkitoBio/urlesc", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/beorn7/perks/quantile/BUILD.bazel b/tests/smoke/vendor/github.com/beorn7/perks/quantile/BUILD.bazel deleted file mode 100644 index b2f14557d72..00000000000 --- a/tests/smoke/vendor/github.com/beorn7/perks/quantile/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["stream.go"], - importmap = "installer/tests/smoke/vendor/github.com/beorn7/perks/quantile", - importpath = "github.com/beorn7/perks/quantile", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/coreos/ktestutil/testworkload/BUILD.bazel b/tests/smoke/vendor/github.com/coreos/ktestutil/testworkload/BUILD.bazel deleted file mode 100644 index 62a9a5f5f1f..00000000000 --- a/tests/smoke/vendor/github.com/coreos/ktestutil/testworkload/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["nginx_workload.go"], - importmap = "installer/tests/smoke/vendor/github.com/coreos/ktestutil/testworkload", - importpath = "github.com/coreos/ktestutil/testworkload", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/davecgh/go-spew/spew/BUILD.bazel b/tests/smoke/vendor/github.com/davecgh/go-spew/spew/BUILD.bazel deleted file mode 100644 index 69080277914..00000000000 --- a/tests/smoke/vendor/github.com/davecgh/go-spew/spew/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bypass.go", - "common.go", - "config.go", - "doc.go", - "dump.go", - "format.go", - "spew.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/davecgh/go-spew/spew", - importpath = "github.com/davecgh/go-spew/spew", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/dgrijalva/jwt-go/BUILD.bazel b/tests/smoke/vendor/github.com/dgrijalva/jwt-go/BUILD.bazel deleted file mode 100644 index 986d76cb39a..00000000000 --- a/tests/smoke/vendor/github.com/dgrijalva/jwt-go/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "claims.go", - "doc.go", - "ecdsa.go", - "ecdsa_utils.go", - "errors.go", - "hmac.go", - "map_claims.go", - "none.go", - "parser.go", - "rsa.go", - "rsa_pss.go", - "rsa_utils.go", - "signing_method.go", - "token.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/dgrijalva/jwt-go", - importpath = "github.com/dgrijalva/jwt-go", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/docker/distribution/BUILD.bazel b/tests/smoke/vendor/github.com/docker/distribution/BUILD.bazel deleted file mode 100644 index 5c9144705c4..00000000000 --- a/tests/smoke/vendor/github.com/docker/distribution/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "blobs.go", - "doc.go", - "errors.go", - "manifests.go", - "registry.go", - "tags.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/distribution", - importpath = "github.com/docker/distribution", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/docker/distribution/digest:go_default_library", - "//tests/smoke/vendor/github.com/docker/distribution/reference:go_default_library", - "//vendor/github.com/docker/distribution/context:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/docker/distribution/digest/BUILD.bazel b/tests/smoke/vendor/github.com/docker/distribution/digest/BUILD.bazel deleted file mode 100644 index 152152ee65f..00000000000 --- a/tests/smoke/vendor/github.com/docker/distribution/digest/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "digest.go", - "digester.go", - "doc.go", - "set.go", - "verifiers.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/distribution/digest", - importpath = "github.com/docker/distribution/digest", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/docker/distribution/reference/BUILD.bazel b/tests/smoke/vendor/github.com/docker/distribution/reference/BUILD.bazel deleted file mode 100644 index b1f9bb03755..00000000000 --- a/tests/smoke/vendor/github.com/docker/distribution/reference/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "reference.go", - "regexp.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/distribution/reference", - importpath = "github.com/docker/distribution/reference", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/docker/distribution/digest:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/BUILD.bazel deleted file mode 100644 index ab26cbe4645..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api", - importpath = "github.com/docker/engine-api", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/client/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/client/BUILD.bazel deleted file mode 100644 index b04f08ea053..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/client/BUILD.bazel +++ /dev/null @@ -1,83 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "client_darwin.go", - "client_unix.go", - "client_windows.go", - "container_attach.go", - "container_commit.go", - "container_copy.go", - "container_create.go", - "container_diff.go", - "container_exec.go", - "container_export.go", - "container_inspect.go", - "container_kill.go", - "container_list.go", - "container_logs.go", - "container_pause.go", - "container_remove.go", - "container_rename.go", - "container_resize.go", - "container_restart.go", - "container_start.go", - "container_stats.go", - "container_stop.go", - "container_top.go", - "container_unpause.go", - "container_update.go", - "container_wait.go", - "errors.go", - "events.go", - "hijack.go", - "image_build.go", - "image_create.go", - "image_history.go", - "image_import.go", - "image_inspect.go", - "image_list.go", - "image_load.go", - "image_pull.go", - "image_push.go", - "image_remove.go", - "image_save.go", - "image_search.go", - "image_tag.go", - "info.go", - "interface.go", - "login.go", - "network_connect.go", - "network_create.go", - "network_disconnect.go", - "network_inspect.go", - "network_list.go", - "network_remove.go", - "request.go", - "version.go", - "volume_create.go", - "volume_inspect.go", - "volume_list.go", - "volume_remove.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/client", - importpath = "github.com/docker/engine-api/client", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/docker/distribution/reference:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/client/transport:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/client/transport/cancellable:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/types:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/types/container:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/types/filters:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/types/network:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/types/reference:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/types/registry:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/types/time:go_default_library", - "//tests/smoke/vendor/github.com/docker/go-connections/sockets:go_default_library", - "//tests/smoke/vendor/github.com/docker/go-connections/tlsconfig:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/client/transport/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/client/transport/BUILD.bazel deleted file mode 100644 index 6ea8b77a9c7..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/client/transport/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "transport.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/client/transport", - importpath = "github.com/docker/engine-api/client/transport", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/docker/go-connections/sockets:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/client/transport/cancellable/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/client/transport/cancellable/BUILD.bazel deleted file mode 100644 index 27821fd18c3..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/client/transport/cancellable/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "canceler.go", - "canceler_go14.go", - "cancellable.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/client/transport/cancellable", - importpath = "github.com/docker/engine-api/client/transport/cancellable", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/docker/engine-api/client/transport:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/types/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/types/BUILD.bazel deleted file mode 100644 index 1402082debf..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/types/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "auth.go", - "client.go", - "configs.go", - "seccomp.go", - "stats.go", - "types.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/types", - importpath = "github.com/docker/engine-api/types", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/docker/engine-api/types/container:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/types/filters:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/types/network:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/types/registry:go_default_library", - "//tests/smoke/vendor/github.com/docker/go-connections/nat:go_default_library", - "//tests/smoke/vendor/github.com/docker/go-units:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/types/blkiodev/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/types/blkiodev/BUILD.bazel deleted file mode 100644 index 4b2603c1585..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/types/blkiodev/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["blkio.go"], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/types/blkiodev", - importpath = "github.com/docker/engine-api/types/blkiodev", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/types/container/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/types/container/BUILD.bazel deleted file mode 100644 index d1c6678ba52..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/types/container/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "host_config.go", - "hostconfig_unix.go", - "hostconfig_windows.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/types/container", - importpath = "github.com/docker/engine-api/types/container", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/docker/engine-api/types/blkiodev:go_default_library", - "//tests/smoke/vendor/github.com/docker/engine-api/types/strslice:go_default_library", - "//tests/smoke/vendor/github.com/docker/go-connections/nat:go_default_library", - "//tests/smoke/vendor/github.com/docker/go-units:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/types/filters/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/types/filters/BUILD.bazel deleted file mode 100644 index 3c326d55916..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/types/filters/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["parse.go"], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/types/filters", - importpath = "github.com/docker/engine-api/types/filters", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/docker/engine-api/types/versions:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/types/network/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/types/network/BUILD.bazel deleted file mode 100644 index 853de37249c..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/types/network/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["network.go"], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/types/network", - importpath = "github.com/docker/engine-api/types/network", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/types/reference/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/types/reference/BUILD.bazel deleted file mode 100644 index e54d41a9306..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/types/reference/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["image_reference.go"], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/types/reference", - importpath = "github.com/docker/engine-api/types/reference", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/docker/distribution/reference:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/types/registry/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/types/registry/BUILD.bazel deleted file mode 100644 index 25549182bc2..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/types/registry/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["registry.go"], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/types/registry", - importpath = "github.com/docker/engine-api/types/registry", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/types/strslice/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/types/strslice/BUILD.bazel deleted file mode 100644 index c078a90ca8d..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/types/strslice/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["strslice.go"], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/types/strslice", - importpath = "github.com/docker/engine-api/types/strslice", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/types/time/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/types/time/BUILD.bazel deleted file mode 100644 index a2e5949739e..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/types/time/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["timestamp.go"], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/types/time", - importpath = "github.com/docker/engine-api/types/time", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/docker/engine-api/types/versions/BUILD.bazel b/tests/smoke/vendor/github.com/docker/engine-api/types/versions/BUILD.bazel deleted file mode 100644 index 210e82488b5..00000000000 --- a/tests/smoke/vendor/github.com/docker/engine-api/types/versions/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["compare.go"], - importmap = "installer/tests/smoke/vendor/github.com/docker/engine-api/types/versions", - importpath = "github.com/docker/engine-api/types/versions", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/docker/go-connections/BUILD.bazel b/tests/smoke/vendor/github.com/docker/go-connections/BUILD.bazel deleted file mode 100644 index b8e3c581b71..00000000000 --- a/tests/smoke/vendor/github.com/docker/go-connections/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - importmap = "installer/tests/smoke/vendor/github.com/docker/go-connections", - importpath = "github.com/docker/go-connections", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/docker/go-connections/nat/BUILD.bazel b/tests/smoke/vendor/github.com/docker/go-connections/nat/BUILD.bazel deleted file mode 100644 index a7dd2173741..00000000000 --- a/tests/smoke/vendor/github.com/docker/go-connections/nat/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "nat.go", - "parse.go", - "sort.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/go-connections/nat", - importpath = "github.com/docker/go-connections/nat", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/docker/go-connections/sockets/BUILD.bazel b/tests/smoke/vendor/github.com/docker/go-connections/sockets/BUILD.bazel deleted file mode 100644 index dcd7906e0c2..00000000000 --- a/tests/smoke/vendor/github.com/docker/go-connections/sockets/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "inmem_socket.go", - "proxy.go", - "sockets.go", - "sockets_unix.go", - "sockets_windows.go", - "tcp_socket.go", - "unix_socket.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/go-connections/sockets", - importpath = "github.com/docker/go-connections/sockets", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/net/proxy:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/github.com/Sirupsen/logrus:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/Sirupsen/logrus:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/github.com/Sirupsen/logrus:go_default_library", - "//vendor/github.com/opencontainers/runc/libcontainer/user:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/Microsoft/go-winio:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/tests/smoke/vendor/github.com/docker/go-connections/tlsconfig/BUILD.bazel b/tests/smoke/vendor/github.com/docker/go-connections/tlsconfig/BUILD.bazel deleted file mode 100644 index ecfe2ecf5c9..00000000000 --- a/tests/smoke/vendor/github.com/docker/go-connections/tlsconfig/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "config_client_ciphers.go", - "config_legacy_client_ciphers.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/go-connections/tlsconfig", - importpath = "github.com/docker/go-connections/tlsconfig", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/Sirupsen/logrus:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/docker/go-units/BUILD.bazel b/tests/smoke/vendor/github.com/docker/go-units/BUILD.bazel deleted file mode 100644 index da2e017d312..00000000000 --- a/tests/smoke/vendor/github.com/docker/go-units/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "duration.go", - "size.go", - "ulimit.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/docker/go-units", - importpath = "github.com/docker/go-units", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/emicklei/go-restful-swagger12/BUILD.bazel b/tests/smoke/vendor/github.com/emicklei/go-restful-swagger12/BUILD.bazel deleted file mode 100644 index 122dcd3ca5e..00000000000 --- a/tests/smoke/vendor/github.com/emicklei/go-restful-swagger12/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api_declaration_list.go", - "config.go", - "model_builder.go", - "model_list.go", - "model_property_ext.go", - "model_property_list.go", - "ordered_route_map.go", - "swagger.go", - "swagger_builder.go", - "swagger_webservice.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/emicklei/go-restful-swagger12", - importpath = "github.com/emicklei/go-restful-swagger12", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/emicklei/go-restful:go_default_library", - "//tests/smoke/vendor/github.com/emicklei/go-restful/log:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/emicklei/go-restful/BUILD.bazel b/tests/smoke/vendor/github.com/emicklei/go-restful/BUILD.bazel deleted file mode 100644 index cf7cd2a5079..00000000000 --- a/tests/smoke/vendor/github.com/emicklei/go-restful/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "compress.go", - "compressor_cache.go", - "compressor_pools.go", - "compressors.go", - "constants.go", - "container.go", - "cors_filter.go", - "curly.go", - "curly_route.go", - "doc.go", - "entity_accessors.go", - "filter.go", - "jsr311.go", - "logger.go", - "mime.go", - "options_filter.go", - "parameter.go", - "path_expression.go", - "request.go", - "response.go", - "route.go", - "route_builder.go", - "router.go", - "service_error.go", - "web_service.go", - "web_service_container.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/emicklei/go-restful", - importpath = "github.com/emicklei/go-restful", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/emicklei/go-restful/log:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/emicklei/go-restful/log/BUILD.bazel b/tests/smoke/vendor/github.com/emicklei/go-restful/log/BUILD.bazel deleted file mode 100644 index 93d30992716..00000000000 --- a/tests/smoke/vendor/github.com/emicklei/go-restful/log/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["log.go"], - importmap = "installer/tests/smoke/vendor/github.com/emicklei/go-restful/log", - importpath = "github.com/emicklei/go-restful/log", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/evanphx/json-patch/BUILD.bazel b/tests/smoke/vendor/github.com/evanphx/json-patch/BUILD.bazel deleted file mode 100644 index 571468d0740..00000000000 --- a/tests/smoke/vendor/github.com/evanphx/json-patch/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "merge.go", - "patch.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/evanphx/json-patch", - importpath = "github.com/evanphx/json-patch", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/exponent-io/jsonpath/BUILD.bazel b/tests/smoke/vendor/github.com/exponent-io/jsonpath/BUILD.bazel deleted file mode 100644 index 0e14f07d701..00000000000 --- a/tests/smoke/vendor/github.com/exponent-io/jsonpath/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "decoder.go", - "path.go", - "pathaction.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/exponent-io/jsonpath", - importpath = "github.com/exponent-io/jsonpath", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/fatih/camelcase/BUILD.bazel b/tests/smoke/vendor/github.com/fatih/camelcase/BUILD.bazel deleted file mode 100644 index 55610157b80..00000000000 --- a/tests/smoke/vendor/github.com/fatih/camelcase/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["camelcase.go"], - importmap = "installer/tests/smoke/vendor/github.com/fatih/camelcase", - importpath = "github.com/fatih/camelcase", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/ghodss/yaml/BUILD.bazel b/tests/smoke/vendor/github.com/ghodss/yaml/BUILD.bazel deleted file mode 100644 index 2f6283ebe6c..00000000000 --- a/tests/smoke/vendor/github.com/ghodss/yaml/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "fields.go", - "yaml.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/ghodss/yaml", - importpath = "github.com/ghodss/yaml", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/gopkg.in/yaml.v2:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/go-openapi/analysis/BUILD.bazel b/tests/smoke/vendor/github.com/go-openapi/analysis/BUILD.bazel deleted file mode 100644 index 2ae58a08302..00000000000 --- a/tests/smoke/vendor/github.com/go-openapi/analysis/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["analyzer.go"], - importmap = "installer/tests/smoke/vendor/github.com/go-openapi/analysis", - importpath = "github.com/go-openapi/analysis", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/go-openapi/jsonpointer:go_default_library", - "//tests/smoke/vendor/github.com/go-openapi/spec:go_default_library", - "//tests/smoke/vendor/github.com/go-openapi/swag:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/go-openapi/jsonpointer/BUILD.bazel b/tests/smoke/vendor/github.com/go-openapi/jsonpointer/BUILD.bazel deleted file mode 100644 index 6c3bad756ed..00000000000 --- a/tests/smoke/vendor/github.com/go-openapi/jsonpointer/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["pointer.go"], - importmap = "installer/tests/smoke/vendor/github.com/go-openapi/jsonpointer", - importpath = "github.com/go-openapi/jsonpointer", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/go-openapi/swag:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/go-openapi/jsonreference/BUILD.bazel b/tests/smoke/vendor/github.com/go-openapi/jsonreference/BUILD.bazel deleted file mode 100644 index e71165a7535..00000000000 --- a/tests/smoke/vendor/github.com/go-openapi/jsonreference/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["reference.go"], - importmap = "installer/tests/smoke/vendor/github.com/go-openapi/jsonreference", - importpath = "github.com/go-openapi/jsonreference", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/PuerkitoBio/purell:go_default_library", - "//tests/smoke/vendor/github.com/go-openapi/jsonpointer:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/go-openapi/loads/BUILD.bazel b/tests/smoke/vendor/github.com/go-openapi/loads/BUILD.bazel deleted file mode 100644 index b0d1f78e7ca..00000000000 --- a/tests/smoke/vendor/github.com/go-openapi/loads/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["spec.go"], - importmap = "installer/tests/smoke/vendor/github.com/go-openapi/loads", - importpath = "github.com/go-openapi/loads", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/go-openapi/analysis:go_default_library", - "//tests/smoke/vendor/github.com/go-openapi/spec:go_default_library", - "//tests/smoke/vendor/github.com/go-openapi/swag:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/go-openapi/spec/BUILD.bazel b/tests/smoke/vendor/github.com/go-openapi/spec/BUILD.bazel deleted file mode 100644 index f746fdaafd8..00000000000 --- a/tests/smoke/vendor/github.com/go-openapi/spec/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bindata.go", - "contact_info.go", - "expander.go", - "external_docs.go", - "header.go", - "info.go", - "items.go", - "license.go", - "operation.go", - "parameter.go", - "path_item.go", - "paths.go", - "ref.go", - "response.go", - "responses.go", - "schema.go", - "security_scheme.go", - "spec.go", - "swagger.go", - "tag.go", - "xml_object.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/go-openapi/spec", - importpath = "github.com/go-openapi/spec", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/go-openapi/jsonpointer:go_default_library", - "//tests/smoke/vendor/github.com/go-openapi/jsonreference:go_default_library", - "//tests/smoke/vendor/github.com/go-openapi/swag:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/go-openapi/swag/BUILD.bazel b/tests/smoke/vendor/github.com/go-openapi/swag/BUILD.bazel deleted file mode 100644 index 829bfbc0e9f..00000000000 --- a/tests/smoke/vendor/github.com/go-openapi/swag/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "convert.go", - "convert_types.go", - "json.go", - "loading.go", - "net.go", - "path.go", - "util.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/go-openapi/swag", - importpath = "github.com/go-openapi/swag", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/mailru/easyjson/jlexer:go_default_library", - "//tests/smoke/vendor/github.com/mailru/easyjson/jwriter:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/gogo/protobuf/proto/BUILD.bazel b/tests/smoke/vendor/github.com/gogo/protobuf/proto/BUILD.bazel deleted file mode 100644 index af7383f63ee..00000000000 --- a/tests/smoke/vendor/github.com/gogo/protobuf/proto/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clone.go", - "decode.go", - "decode_gogo.go", - "duration.go", - "duration_gogo.go", - "encode.go", - "encode_gogo.go", - "equal.go", - "extensions.go", - "extensions_gogo.go", - "lib.go", - "lib_gogo.go", - "message_set.go", - "pointer_unsafe.go", - "pointer_unsafe_gogo.go", - "properties.go", - "properties_gogo.go", - "skip_gogo.go", - "text.go", - "text_gogo.go", - "text_parser.go", - "timestamp.go", - "timestamp_gogo.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/gogo/protobuf/proto", - importpath = "github.com/gogo/protobuf/proto", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/gogo/protobuf/sortkeys/BUILD.bazel b/tests/smoke/vendor/github.com/gogo/protobuf/sortkeys/BUILD.bazel deleted file mode 100644 index 7959fe99c4b..00000000000 --- a/tests/smoke/vendor/github.com/gogo/protobuf/sortkeys/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["sortkeys.go"], - importmap = "installer/tests/smoke/vendor/github.com/gogo/protobuf/sortkeys", - importpath = "github.com/gogo/protobuf/sortkeys", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/golang/glog/BUILD.bazel b/tests/smoke/vendor/github.com/golang/glog/BUILD.bazel deleted file mode 100644 index a92b3d9c76d..00000000000 --- a/tests/smoke/vendor/github.com/golang/glog/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "glog.go", - "glog_file.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/golang/glog", - importpath = "github.com/golang/glog", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/golang/groupcache/BUILD.bazel b/tests/smoke/vendor/github.com/golang/groupcache/BUILD.bazel deleted file mode 100644 index 84ab5dba828..00000000000 --- a/tests/smoke/vendor/github.com/golang/groupcache/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "byteview.go", - "groupcache.go", - "http.go", - "peers.go", - "sinks.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/golang/groupcache", - importpath = "github.com/golang/groupcache", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/groupcache/lru:go_default_library", - "//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/golang/groupcache/consistenthash:go_default_library", - "//vendor/github.com/golang/groupcache/groupcachepb:go_default_library", - "//vendor/github.com/golang/groupcache/singleflight:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/golang/groupcache/lru/BUILD.bazel b/tests/smoke/vendor/github.com/golang/groupcache/lru/BUILD.bazel deleted file mode 100644 index d5f0bcf8ee8..00000000000 --- a/tests/smoke/vendor/github.com/golang/groupcache/lru/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["lru.go"], - importmap = "installer/tests/smoke/vendor/github.com/golang/groupcache/lru", - importpath = "github.com/golang/groupcache/lru", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/golang/protobuf/proto/BUILD.bazel b/tests/smoke/vendor/github.com/golang/protobuf/proto/BUILD.bazel deleted file mode 100644 index 2f13d552834..00000000000 --- a/tests/smoke/vendor/github.com/golang/protobuf/proto/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clone.go", - "decode.go", - "encode.go", - "equal.go", - "extensions.go", - "lib.go", - "message_set.go", - "pointer_unsafe.go", - "properties.go", - "text.go", - "text_parser.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/golang/protobuf/proto", - importpath = "github.com/golang/protobuf/proto", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/google/gofuzz/BUILD.bazel b/tests/smoke/vendor/github.com/google/gofuzz/BUILD.bazel deleted file mode 100644 index af3d1105794..00000000000 --- a/tests/smoke/vendor/github.com/google/gofuzz/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fuzz.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/google/gofuzz", - importpath = "github.com/google/gofuzz", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/hashicorp/golang-lru/BUILD.bazel b/tests/smoke/vendor/github.com/hashicorp/golang-lru/BUILD.bazel deleted file mode 100644 index b3c7a4fbf0c..00000000000 --- a/tests/smoke/vendor/github.com/hashicorp/golang-lru/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "2q.go", - "arc.go", - "lru.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/hashicorp/golang-lru", - importpath = "github.com/hashicorp/golang-lru", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/hashicorp/golang-lru/simplelru:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/hashicorp/golang-lru/simplelru/BUILD.bazel b/tests/smoke/vendor/github.com/hashicorp/golang-lru/simplelru/BUILD.bazel deleted file mode 100644 index f442908b35c..00000000000 --- a/tests/smoke/vendor/github.com/hashicorp/golang-lru/simplelru/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["lru.go"], - importmap = "installer/tests/smoke/vendor/github.com/hashicorp/golang-lru/simplelru", - importpath = "github.com/hashicorp/golang-lru/simplelru", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/howeyc/gopass/BUILD.bazel b/tests/smoke/vendor/github.com/howeyc/gopass/BUILD.bazel deleted file mode 100644 index f13d69cc153..00000000000 --- a/tests/smoke/vendor/github.com/howeyc/gopass/BUILD.bazel +++ /dev/null @@ -1,49 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "pass.go", - "terminal.go", - "terminal_solaris.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/howeyc/gopass", - importpath = "github.com/howeyc/gopass", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "//tests/smoke/vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//tests/smoke/vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//tests/smoke/vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//tests/smoke/vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//tests/smoke/vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//tests/smoke/vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//tests/smoke/vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//tests/smoke/vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//tests/smoke/vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//tests/smoke/vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//tests/smoke/vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/tests/smoke/vendor/github.com/imdario/mergo/BUILD.bazel b/tests/smoke/vendor/github.com/imdario/mergo/BUILD.bazel deleted file mode 100644 index 2a3cc30d068..00000000000 --- a/tests/smoke/vendor/github.com/imdario/mergo/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "map.go", - "merge.go", - "mergo.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/imdario/mergo", - importpath = "github.com/imdario/mergo", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel b/tests/smoke/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel deleted file mode 100644 index 8937030d4ce..00000000000 --- a/tests/smoke/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "trap_others.go", - "trap_windows.go", - "trap_windows_1.4.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/inconshreveable/mousetrap", - importpath = "github.com/inconshreveable/mousetrap", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/juju/ratelimit/BUILD.bazel b/tests/smoke/vendor/github.com/juju/ratelimit/BUILD.bazel deleted file mode 100644 index 219d36e5ed5..00000000000 --- a/tests/smoke/vendor/github.com/juju/ratelimit/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "ratelimit.go", - "reader.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/juju/ratelimit", - importpath = "github.com/juju/ratelimit", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/mailru/easyjson/BUILD.bazel b/tests/smoke/vendor/github.com/mailru/easyjson/BUILD.bazel deleted file mode 100644 index c2f0892249c..00000000000 --- a/tests/smoke/vendor/github.com/mailru/easyjson/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "helpers.go", - "raw.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/mailru/easyjson", - importpath = "github.com/mailru/easyjson", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/mailru/easyjson/jlexer:go_default_library", - "//tests/smoke/vendor/github.com/mailru/easyjson/jwriter:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/mailru/easyjson/buffer/BUILD.bazel b/tests/smoke/vendor/github.com/mailru/easyjson/buffer/BUILD.bazel deleted file mode 100644 index 8931ead23c9..00000000000 --- a/tests/smoke/vendor/github.com/mailru/easyjson/buffer/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["pool.go"], - importmap = "installer/tests/smoke/vendor/github.com/mailru/easyjson/buffer", - importpath = "github.com/mailru/easyjson/buffer", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/mailru/easyjson/jlexer/BUILD.bazel b/tests/smoke/vendor/github.com/mailru/easyjson/jlexer/BUILD.bazel deleted file mode 100644 index 296c67e8ad8..00000000000 --- a/tests/smoke/vendor/github.com/mailru/easyjson/jlexer/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "error.go", - "lexer.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/mailru/easyjson/jlexer", - importpath = "github.com/mailru/easyjson/jlexer", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/mailru/easyjson/jwriter/BUILD.bazel b/tests/smoke/vendor/github.com/mailru/easyjson/jwriter/BUILD.bazel deleted file mode 100644 index ef52f474a36..00000000000 --- a/tests/smoke/vendor/github.com/mailru/easyjson/jwriter/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["writer.go"], - importmap = "installer/tests/smoke/vendor/github.com/mailru/easyjson/jwriter", - importpath = "github.com/mailru/easyjson/jwriter", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/mailru/easyjson/buffer:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel b/tests/smoke/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel deleted file mode 100644 index 322b2ca1162..00000000000 --- a/tests/smoke/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "decode.go", - "doc.go", - "encode.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil", - importpath = "github.com/matttproud/golang_protobuf_extensions/pbutil", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/pborman/uuid/BUILD.bazel b/tests/smoke/vendor/github.com/pborman/uuid/BUILD.bazel deleted file mode 100644 index 1bb2a55b61a..00000000000 --- a/tests/smoke/vendor/github.com/pborman/uuid/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "dce.go", - "doc.go", - "hash.go", - "json.go", - "node.go", - "time.go", - "util.go", - "uuid.go", - "version1.go", - "version4.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/pborman/uuid", - importpath = "github.com/pborman/uuid", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel b/tests/smoke/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel deleted file mode 100644 index 8725e276089..00000000000 --- a/tests/smoke/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "collector.go", - "counter.go", - "desc.go", - "doc.go", - "expvar_collector.go", - "fnv.go", - "gauge.go", - "go_collector.go", - "histogram.go", - "http.go", - "metric.go", - "observer.go", - "process_collector.go", - "registry.go", - "summary.go", - "timer.go", - "untyped.go", - "value.go", - "vec.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/prometheus/client_golang/prometheus", - importpath = "github.com/prometheus/client_golang/prometheus", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/beorn7/perks/quantile:go_default_library", - "//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/prometheus/client_model/go:go_default_library", - "//tests/smoke/vendor/github.com/prometheus/common/expfmt:go_default_library", - "//tests/smoke/vendor/github.com/prometheus/common/model:go_default_library", - "//tests/smoke/vendor/github.com/prometheus/procfs:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD.bazel b/tests/smoke/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD.bazel deleted file mode 100644 index fbab43471fd..00000000000 --- a/tests/smoke/vendor/github.com/prometheus/client_golang/prometheus/promhttp/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "delegator_1_7.go", - "delegator_1_8.go", - "http.go", - "instrument_client.go", - "instrument_client_1_8.go", - "instrument_server.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/prometheus/client_golang/prometheus/promhttp", - importpath = "github.com/prometheus/client_golang/prometheus/promhttp", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//tests/smoke/vendor/github.com/prometheus/client_model/go:go_default_library", - "//tests/smoke/vendor/github.com/prometheus/common/expfmt:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/prometheus/client_model/go/BUILD.bazel b/tests/smoke/vendor/github.com/prometheus/client_model/go/BUILD.bazel deleted file mode 100644 index 4a024a1fb12..00000000000 --- a/tests/smoke/vendor/github.com/prometheus/client_model/go/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["metrics.pb.go"], - importmap = "installer/tests/smoke/vendor/github.com/prometheus/client_model/go", - importpath = "github.com/prometheus/client_model/go", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/prometheus/common/expfmt/BUILD.bazel b/tests/smoke/vendor/github.com/prometheus/common/expfmt/BUILD.bazel deleted file mode 100644 index 836a4d1e8d5..00000000000 --- a/tests/smoke/vendor/github.com/prometheus/common/expfmt/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "decode.go", - "encode.go", - "expfmt.go", - "text_create.go", - "text_parse.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/prometheus/common/expfmt", - importpath = "github.com/prometheus/common/expfmt", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil:go_default_library", - "//tests/smoke/vendor/github.com/prometheus/client_model/go:go_default_library", - "//tests/smoke/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg:go_default_library", - "//tests/smoke/vendor/github.com/prometheus/common/model:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/BUILD.bazel b/tests/smoke/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/BUILD.bazel deleted file mode 100644 index 9fe3ab2d6f6..00000000000 --- a/tests/smoke/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["autoneg.go"], - importmap = "installer/tests/smoke/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - importpath = "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - visibility = ["//tests/smoke/vendor/github.com/prometheus/common:__subpackages__"], -) diff --git a/tests/smoke/vendor/github.com/prometheus/common/model/BUILD.bazel b/tests/smoke/vendor/github.com/prometheus/common/model/BUILD.bazel deleted file mode 100644 index 6c92548bfbf..00000000000 --- a/tests/smoke/vendor/github.com/prometheus/common/model/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "alert.go", - "fingerprinting.go", - "fnv.go", - "labels.go", - "labelset.go", - "metric.go", - "model.go", - "signature.go", - "silence.go", - "time.go", - "value.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/prometheus/common/model", - importpath = "github.com/prometheus/common/model", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/prometheus/procfs/BUILD.bazel b/tests/smoke/vendor/github.com/prometheus/procfs/BUILD.bazel deleted file mode 100644 index cce4d27cb4a..00000000000 --- a/tests/smoke/vendor/github.com/prometheus/procfs/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "buddyinfo.go", - "doc.go", - "fs.go", - "ipvs.go", - "mdstat.go", - "mountstats.go", - "proc.go", - "proc_io.go", - "proc_limits.go", - "proc_stat.go", - "stat.go", - "xfrm.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/prometheus/procfs", - importpath = "github.com/prometheus/procfs", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/prometheus/procfs/xfs:go_default_library"], -) diff --git a/tests/smoke/vendor/github.com/prometheus/procfs/xfs/BUILD.bazel b/tests/smoke/vendor/github.com/prometheus/procfs/xfs/BUILD.bazel deleted file mode 100644 index 3251b7a2fc8..00000000000 --- a/tests/smoke/vendor/github.com/prometheus/procfs/xfs/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "parse.go", - "xfs.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/prometheus/procfs/xfs", - importpath = "github.com/prometheus/procfs/xfs", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/spf13/cobra/BUILD.bazel b/tests/smoke/vendor/github.com/spf13/cobra/BUILD.bazel deleted file mode 100644 index 9fe76d4ba71..00000000000 --- a/tests/smoke/vendor/github.com/spf13/cobra/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bash_completions.go", - "cobra.go", - "command.go", - "command_notwin.go", - "command_win.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/spf13/cobra", - importpath = "github.com/spf13/cobra", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/spf13/pflag:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//tests/smoke/vendor/github.com/inconshreveable/mousetrap:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/tests/smoke/vendor/github.com/spf13/cobra/doc/BUILD.bazel b/tests/smoke/vendor/github.com/spf13/cobra/doc/BUILD.bazel deleted file mode 100644 index 774fcf190aa..00000000000 --- a/tests/smoke/vendor/github.com/spf13/cobra/doc/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "man_docs.go", - "md_docs.go", - "util.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/spf13/cobra/doc", - importpath = "github.com/spf13/cobra/doc", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/spf13/cobra:go_default_library", - "//tests/smoke/vendor/github.com/spf13/pflag:go_default_library", - "//vendor/github.com/cpuguy83/go-md2man/md2man:go_default_library", - ], -) diff --git a/tests/smoke/vendor/github.com/spf13/pflag/BUILD.bazel b/tests/smoke/vendor/github.com/spf13/pflag/BUILD.bazel deleted file mode 100644 index 8c90ca4997e..00000000000 --- a/tests/smoke/vendor/github.com/spf13/pflag/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bool.go", - "bool_slice.go", - "count.go", - "duration.go", - "flag.go", - "float32.go", - "float64.go", - "golangflag.go", - "int.go", - "int32.go", - "int64.go", - "int8.go", - "int_slice.go", - "ip.go", - "ip_slice.go", - "ipmask.go", - "ipnet.go", - "string.go", - "string_array.go", - "string_slice.go", - "uint.go", - "uint16.go", - "uint32.go", - "uint64.go", - "uint8.go", - "uint_slice.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/spf13/pflag", - importpath = "github.com/spf13/pflag", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/github.com/ugorji/go/codec/BUILD.bazel b/tests/smoke/vendor/github.com/ugorji/go/codec/BUILD.bazel deleted file mode 100644 index 7ccfa349ed4..00000000000 --- a/tests/smoke/vendor/github.com/ugorji/go/codec/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "0doc.go", - "binc.go", - "cbor.go", - "decode.go", - "decode_go.go", - "decode_go14.go", - "encode.go", - "fast-path.generated.go", - "gen.generated.go", - "gen.go", - "gen-helper.generated.go", - "gen_15.go", - "gen_16.go", - "gen_17.go", - "helper.go", - "helper_internal.go", - "helper_not_unsafe.go", - "json.go", - "msgpack.go", - "noop.go", - "prebuild.go", - "rpc.go", - "simple.go", - "time.go", - ], - importmap = "installer/tests/smoke/vendor/github.com/ugorji/go/codec", - importpath = "github.com/ugorji/go/codec", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/golang.org/x/crypto/ssh/BUILD.bazel b/tests/smoke/vendor/golang.org/x/crypto/ssh/BUILD.bazel deleted file mode 100644 index b38952f0d0c..00000000000 --- a/tests/smoke/vendor/golang.org/x/crypto/ssh/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "buffer.go", - "certs.go", - "channel.go", - "cipher.go", - "client.go", - "client_auth.go", - "common.go", - "connection.go", - "doc.go", - "handshake.go", - "kex.go", - "keys.go", - "mac.go", - "messages.go", - "mux.go", - "server.go", - "session.go", - "streamlocal.go", - "tcpip.go", - "transport.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/crypto/ssh", - importpath = "golang.org/x/crypto/ssh", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/crypto/curve25519:go_default_library", - "//vendor/golang.org/x/crypto/ed25519:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/crypto/ssh/agent/BUILD.bazel b/tests/smoke/vendor/golang.org/x/crypto/ssh/agent/BUILD.bazel deleted file mode 100644 index 49164ea6f2f..00000000000 --- a/tests/smoke/vendor/golang.org/x/crypto/ssh/agent/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "forward.go", - "keyring.go", - "server.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/crypto/ssh/agent", - importpath = "golang.org/x/crypto/ssh/agent", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/golang.org/x/crypto/ssh:go_default_library", - "//vendor/golang.org/x/crypto/ed25519:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/crypto/ssh/terminal/BUILD.bazel b/tests/smoke/vendor/golang.org/x/crypto/ssh/terminal/BUILD.bazel deleted file mode 100644 index d9cdc728ed9..00000000000 --- a/tests/smoke/vendor/golang.org/x/crypto/ssh/terminal/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "terminal.go", - "util.go", - "util_bsd.go", - "util_linux.go", - "util_plan9.go", - "util_solaris.go", - "util_windows.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/crypto/ssh/terminal", - importpath = "golang.org/x/crypto/ssh/terminal", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:solaris": [ - "//tests/smoke/vendor/golang.org/x/sys/unix:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/tests/smoke/vendor/golang.org/x/net/context/BUILD.bazel b/tests/smoke/vendor/golang.org/x/net/context/BUILD.bazel deleted file mode 100644 index d59ffabece9..00000000000 --- a/tests/smoke/vendor/golang.org/x/net/context/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "go17.go", - "pre_go17.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/net/context", - importpath = "golang.org/x/net/context", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/golang.org/x/net/context/ctxhttp/BUILD.bazel b/tests/smoke/vendor/golang.org/x/net/context/ctxhttp/BUILD.bazel deleted file mode 100644 index f125e5619d9..00000000000 --- a/tests/smoke/vendor/golang.org/x/net/context/ctxhttp/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "ctxhttp.go", - "ctxhttp_pre17.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/net/context/ctxhttp", - importpath = "golang.org/x/net/context/ctxhttp", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/golang.org/x/net/context:go_default_library"], -) diff --git a/tests/smoke/vendor/golang.org/x/net/http2/BUILD.bazel b/tests/smoke/vendor/golang.org/x/net/http2/BUILD.bazel deleted file mode 100644 index 0f77379f41a..00000000000 --- a/tests/smoke/vendor/golang.org/x/net/http2/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "client_conn_pool.go", - "configure_transport.go", - "errors.go", - "fixed_buffer.go", - "flow.go", - "frame.go", - "go16.go", - "go17.go", - "go17_not18.go", - "go18.go", - "gotrack.go", - "headermap.go", - "http2.go", - "not_go16.go", - "not_go17.go", - "not_go18.go", - "pipe.go", - "server.go", - "transport.go", - "write.go", - "writesched.go", - "writesched_priority.go", - "writesched_random.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/net/http2", - importpath = "golang.org/x/net/http2", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/golang.org/x/net/http2/hpack:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/idna:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/lex/httplex:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/net/http2/hpack/BUILD.bazel b/tests/smoke/vendor/golang.org/x/net/http2/hpack/BUILD.bazel deleted file mode 100644 index aa393d1aa6b..00000000000 --- a/tests/smoke/vendor/golang.org/x/net/http2/hpack/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "encode.go", - "hpack.go", - "huffman.go", - "tables.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/net/http2/hpack", - importpath = "golang.org/x/net/http2/hpack", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/golang.org/x/net/idna/BUILD.bazel b/tests/smoke/vendor/golang.org/x/net/idna/BUILD.bazel deleted file mode 100644 index 7261fcca202..00000000000 --- a/tests/smoke/vendor/golang.org/x/net/idna/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "idna.go", - "punycode.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/net/idna", - importpath = "golang.org/x/net/idna", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/golang.org/x/net/lex/httplex/BUILD.bazel b/tests/smoke/vendor/golang.org/x/net/lex/httplex/BUILD.bazel deleted file mode 100644 index 0c1716559a2..00000000000 --- a/tests/smoke/vendor/golang.org/x/net/lex/httplex/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["httplex.go"], - importmap = "installer/tests/smoke/vendor/golang.org/x/net/lex/httplex", - importpath = "golang.org/x/net/lex/httplex", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/golang.org/x/net/idna:go_default_library"], -) diff --git a/tests/smoke/vendor/golang.org/x/oauth2/BUILD.bazel b/tests/smoke/vendor/golang.org/x/oauth2/BUILD.bazel deleted file mode 100644 index 8fba679fa0a..00000000000 --- a/tests/smoke/vendor/golang.org/x/oauth2/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "oauth2.go", - "token.go", - "transport.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/oauth2", - importpath = "golang.org/x/oauth2", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/golang.org/x/net/context:go_default_library", - "//tests/smoke/vendor/golang.org/x/oauth2/internal:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/oauth2/google/BUILD.bazel b/tests/smoke/vendor/golang.org/x/oauth2/google/BUILD.bazel deleted file mode 100644 index 38ccd54b4de..00000000000 --- a/tests/smoke/vendor/golang.org/x/oauth2/google/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "appengine.go", - "default.go", - "google.go", - "jwt.go", - "sdk.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/oauth2/google", - importpath = "golang.org/x/oauth2/google", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/cloud.google.com/go/compute/metadata:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/context:go_default_library", - "//tests/smoke/vendor/golang.org/x/oauth2:go_default_library", - "//tests/smoke/vendor/golang.org/x/oauth2/internal:go_default_library", - "//tests/smoke/vendor/golang.org/x/oauth2/jws:go_default_library", - "//tests/smoke/vendor/golang.org/x/oauth2/jwt:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/oauth2/internal/BUILD.bazel b/tests/smoke/vendor/golang.org/x/oauth2/internal/BUILD.bazel deleted file mode 100644 index 0fae45d4d9c..00000000000 --- a/tests/smoke/vendor/golang.org/x/oauth2/internal/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "oauth2.go", - "token.go", - "transport.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/oauth2/internal", - importpath = "golang.org/x/oauth2/internal", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/golang.org/x/net/context:go_default_library"], -) diff --git a/tests/smoke/vendor/golang.org/x/oauth2/jws/BUILD.bazel b/tests/smoke/vendor/golang.org/x/oauth2/jws/BUILD.bazel deleted file mode 100644 index e3b06e80346..00000000000 --- a/tests/smoke/vendor/golang.org/x/oauth2/jws/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["jws.go"], - importmap = "installer/tests/smoke/vendor/golang.org/x/oauth2/jws", - importpath = "golang.org/x/oauth2/jws", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/golang.org/x/oauth2/jwt/BUILD.bazel b/tests/smoke/vendor/golang.org/x/oauth2/jwt/BUILD.bazel deleted file mode 100644 index 80a2feccf6e..00000000000 --- a/tests/smoke/vendor/golang.org/x/oauth2/jwt/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["jwt.go"], - importmap = "installer/tests/smoke/vendor/golang.org/x/oauth2/jwt", - importpath = "golang.org/x/oauth2/jwt", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/golang.org/x/net/context:go_default_library", - "//tests/smoke/vendor/golang.org/x/oauth2:go_default_library", - "//tests/smoke/vendor/golang.org/x/oauth2/internal:go_default_library", - "//tests/smoke/vendor/golang.org/x/oauth2/jws:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/sys/unix/BUILD.bazel b/tests/smoke/vendor/golang.org/x/sys/unix/BUILD.bazel deleted file mode 100644 index 61e0d7734f0..00000000000 --- a/tests/smoke/vendor/golang.org/x/sys/unix/BUILD.bazel +++ /dev/null @@ -1,168 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "asm.s", - "asm_darwin_386.s", - "asm_darwin_amd64.s", - "asm_darwin_arm.s", - "asm_darwin_arm64.s", - "asm_dragonfly_amd64.s", - "asm_freebsd_386.s", - "asm_freebsd_amd64.s", - "asm_freebsd_arm.s", - "asm_linux_386.s", - "asm_linux_amd64.s", - "asm_linux_arm.s", - "asm_linux_arm64.s", - "asm_linux_mips64x.s", - "asm_linux_ppc64x.s", - "asm_linux_s390x.s", - "asm_netbsd_386.s", - "asm_netbsd_amd64.s", - "asm_netbsd_arm.s", - "asm_openbsd_386.s", - "asm_openbsd_amd64.s", - "asm_solaris_amd64.s", - "bluetooth_linux.go", - "constants.go", - "env_unix.go", - "env_unset.go", - "flock.go", - "flock_linux_32bit.go", - "race0.go", - "sockcmsg_linux.go", - "sockcmsg_unix.go", - "str.go", - "syscall.go", - "syscall_bsd.go", - "syscall_darwin.go", - "syscall_darwin_386.go", - "syscall_darwin_amd64.go", - "syscall_darwin_arm.go", - "syscall_darwin_arm64.go", - "syscall_dragonfly.go", - "syscall_dragonfly_amd64.go", - "syscall_freebsd.go", - "syscall_freebsd_386.go", - "syscall_freebsd_amd64.go", - "syscall_freebsd_arm.go", - "syscall_linux.go", - "syscall_linux_386.go", - "syscall_linux_amd64.go", - "syscall_linux_arm.go", - "syscall_linux_arm64.go", - "syscall_linux_mips64x.go", - "syscall_linux_ppc64x.go", - "syscall_linux_s390x.go", - "syscall_netbsd.go", - "syscall_netbsd_386.go", - "syscall_netbsd_amd64.go", - "syscall_netbsd_arm.go", - "syscall_no_getwd.go", - "syscall_openbsd.go", - "syscall_openbsd_386.go", - "syscall_openbsd_amd64.go", - "syscall_solaris.go", - "syscall_solaris_amd64.go", - "syscall_unix.go", - "zerrors_darwin_386.go", - "zerrors_darwin_amd64.go", - "zerrors_darwin_arm.go", - "zerrors_darwin_arm64.go", - "zerrors_dragonfly_amd64.go", - "zerrors_freebsd_386.go", - "zerrors_freebsd_amd64.go", - "zerrors_freebsd_arm.go", - "zerrors_linux_386.go", - "zerrors_linux_amd64.go", - "zerrors_linux_arm.go", - "zerrors_linux_arm64.go", - "zerrors_linux_mips64.go", - "zerrors_linux_mips64le.go", - "zerrors_linux_ppc64.go", - "zerrors_linux_ppc64le.go", - "zerrors_linux_s390x.go", - "zerrors_netbsd_386.go", - "zerrors_netbsd_amd64.go", - "zerrors_netbsd_arm.go", - "zerrors_openbsd_386.go", - "zerrors_openbsd_amd64.go", - "zerrors_solaris_amd64.go", - "zsyscall_darwin_386.go", - "zsyscall_darwin_amd64.go", - "zsyscall_darwin_arm.go", - "zsyscall_darwin_arm64.go", - "zsyscall_dragonfly_amd64.go", - "zsyscall_freebsd_386.go", - "zsyscall_freebsd_amd64.go", - "zsyscall_freebsd_arm.go", - "zsyscall_linux_386.go", - "zsyscall_linux_amd64.go", - "zsyscall_linux_arm.go", - "zsyscall_linux_arm64.go", - "zsyscall_linux_mips64.go", - "zsyscall_linux_mips64le.go", - "zsyscall_linux_ppc64.go", - "zsyscall_linux_ppc64le.go", - "zsyscall_linux_s390x.go", - "zsyscall_netbsd_386.go", - "zsyscall_netbsd_amd64.go", - "zsyscall_netbsd_arm.go", - "zsyscall_openbsd_386.go", - "zsyscall_openbsd_amd64.go", - "zsyscall_solaris_amd64.go", - "zsysctl_openbsd.go", - "zsysnum_darwin_386.go", - "zsysnum_darwin_amd64.go", - "zsysnum_darwin_arm.go", - "zsysnum_darwin_arm64.go", - "zsysnum_dragonfly_amd64.go", - "zsysnum_freebsd_386.go", - "zsysnum_freebsd_amd64.go", - "zsysnum_freebsd_arm.go", - "zsysnum_linux_386.go", - "zsysnum_linux_amd64.go", - "zsysnum_linux_arm.go", - "zsysnum_linux_arm64.go", - "zsysnum_linux_mips64.go", - "zsysnum_linux_mips64le.go", - "zsysnum_linux_ppc64.go", - "zsysnum_linux_ppc64le.go", - "zsysnum_linux_s390x.go", - "zsysnum_netbsd_386.go", - "zsysnum_netbsd_amd64.go", - "zsysnum_netbsd_arm.go", - "zsysnum_openbsd_386.go", - "zsysnum_openbsd_amd64.go", - "zsysnum_solaris_amd64.go", - "ztypes_darwin_386.go", - "ztypes_darwin_amd64.go", - "ztypes_darwin_arm.go", - "ztypes_darwin_arm64.go", - "ztypes_dragonfly_amd64.go", - "ztypes_freebsd_386.go", - "ztypes_freebsd_amd64.go", - "ztypes_freebsd_arm.go", - "ztypes_linux_386.go", - "ztypes_linux_amd64.go", - "ztypes_linux_arm.go", - "ztypes_linux_arm64.go", - "ztypes_linux_mips64.go", - "ztypes_linux_mips64le.go", - "ztypes_linux_ppc64.go", - "ztypes_linux_ppc64le.go", - "ztypes_linux_s390x.go", - "ztypes_netbsd_386.go", - "ztypes_netbsd_amd64.go", - "ztypes_netbsd_arm.go", - "ztypes_openbsd_386.go", - "ztypes_openbsd_amd64.go", - "ztypes_solaris_amd64.go", - ], - cgo = True, - importmap = "installer/tests/smoke/vendor/golang.org/x/sys/unix", - importpath = "golang.org/x/sys/unix", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/golang.org/x/text/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/BUILD.bazel deleted file mode 100644 index 1e4758f94bd..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - importmap = "installer/tests/smoke/vendor/golang.org/x/text", - importpath = "golang.org/x/text", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/golang.org/x/text/cases/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/cases/BUILD.bazel deleted file mode 100644 index d1fe59cc216..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/cases/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cases.go", - "context.go", - "fold.go", - "info.go", - "map.go", - "tables.go", - "trieval.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/cases", - importpath = "golang.org/x/text/cases", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/golang.org/x/text/language:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/transform:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/unicode/norm:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/text/encoding/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/encoding/BUILD.bazel deleted file mode 100644 index 1666ec0a0b0..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/encoding/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["encoding.go"], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/encoding", - importpath = "golang.org/x/text/encoding", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/golang.org/x/text/encoding/internal/identifier:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/transform:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/text/encoding/internal/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/encoding/internal/BUILD.bazel deleted file mode 100644 index 95a7f04beae..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/encoding/internal/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["internal.go"], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/encoding/internal", - importpath = "golang.org/x/text/encoding/internal", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/golang.org/x/text/encoding:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/encoding/internal/identifier:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/transform:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/text/encoding/internal/identifier/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/encoding/internal/identifier/BUILD.bazel deleted file mode 100644 index be6a91f00fd..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/encoding/internal/identifier/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "identifier.go", - "mib.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/encoding/internal/identifier", - importpath = "golang.org/x/text/encoding/internal/identifier", - visibility = ["//tests/smoke/vendor/golang.org/x/text/encoding:__subpackages__"], -) diff --git a/tests/smoke/vendor/golang.org/x/text/encoding/unicode/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/encoding/unicode/BUILD.bazel deleted file mode 100644 index 19d1bd4ca77..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/encoding/unicode/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "override.go", - "unicode.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/encoding/unicode", - importpath = "golang.org/x/text/encoding/unicode", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/golang.org/x/text/encoding:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/encoding/internal:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/encoding/internal/identifier:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/internal/utf8internal:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/runes:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/transform:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/text/internal/tag/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/internal/tag/BUILD.bazel deleted file mode 100644 index 801457ac803..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/internal/tag/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["tag.go"], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/internal/tag", - importpath = "golang.org/x/text/internal/tag", - visibility = ["//tests/smoke/vendor/golang.org/x/text:__subpackages__"], -) diff --git a/tests/smoke/vendor/golang.org/x/text/internal/utf8internal/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/internal/utf8internal/BUILD.bazel deleted file mode 100644 index ba5b78ee159..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/internal/utf8internal/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["utf8internal.go"], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/internal/utf8internal", - importpath = "golang.org/x/text/internal/utf8internal", - visibility = ["//tests/smoke/vendor/golang.org/x/text:__subpackages__"], -) diff --git a/tests/smoke/vendor/golang.org/x/text/language/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/language/BUILD.bazel deleted file mode 100644 index 7fb840667d3..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/language/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "common.go", - "coverage.go", - "go1_1.go", - "go1_2.go", - "index.go", - "language.go", - "lookup.go", - "match.go", - "parse.go", - "tables.go", - "tags.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/language", - importpath = "golang.org/x/text/language", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/golang.org/x/text/internal/tag:go_default_library"], -) diff --git a/tests/smoke/vendor/golang.org/x/text/runes/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/runes/BUILD.bazel deleted file mode 100644 index c3ccb84d59a..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/runes/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cond.go", - "runes.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/runes", - importpath = "golang.org/x/text/runes", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/golang.org/x/text/transform:go_default_library"], -) diff --git a/tests/smoke/vendor/golang.org/x/text/secure/bidirule/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/secure/bidirule/BUILD.bazel deleted file mode 100644 index d82104613fe..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/secure/bidirule/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["bidirule.go"], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/secure/bidirule", - importpath = "golang.org/x/text/secure/bidirule", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/golang.org/x/text/transform:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/unicode/bidi:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/text/secure/precis/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/secure/precis/BUILD.bazel deleted file mode 100644 index 147afe90dd5..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/secure/precis/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "class.go", - "context.go", - "doc.go", - "nickname.go", - "options.go", - "profile.go", - "profiles.go", - "tables.go", - "transformer.go", - "trieval.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/secure/precis", - importpath = "golang.org/x/text/secure/precis", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/golang.org/x/text/cases:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/runes:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/secure/bidirule:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/transform:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/unicode/norm:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/width:go_default_library", - ], -) diff --git a/tests/smoke/vendor/golang.org/x/text/transform/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/transform/BUILD.bazel deleted file mode 100644 index a1a8ddb3bda..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/transform/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["transform.go"], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/transform", - importpath = "golang.org/x/text/transform", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/golang.org/x/text/unicode/bidi/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/unicode/bidi/BUILD.bazel deleted file mode 100644 index f13029d903c..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/unicode/bidi/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bidi.go", - "bracket.go", - "core.go", - "prop.go", - "tables.go", - "trieval.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/unicode/bidi", - importpath = "golang.org/x/text/unicode/bidi", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/golang.org/x/text/unicode/norm/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/unicode/norm/BUILD.bazel deleted file mode 100644 index 25fa8a769b6..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/unicode/norm/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "composition.go", - "forminfo.go", - "input.go", - "iter.go", - "normalize.go", - "readwriter.go", - "tables.go", - "transform.go", - "trie.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/unicode/norm", - importpath = "golang.org/x/text/unicode/norm", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/golang.org/x/text/transform:go_default_library"], -) diff --git a/tests/smoke/vendor/golang.org/x/text/width/BUILD.bazel b/tests/smoke/vendor/golang.org/x/text/width/BUILD.bazel deleted file mode 100644 index 8ee23eea33c..00000000000 --- a/tests/smoke/vendor/golang.org/x/text/width/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "kind_string.go", - "tables.go", - "transform.go", - "trieval.go", - "width.go", - ], - importmap = "installer/tests/smoke/vendor/golang.org/x/text/width", - importpath = "golang.org/x/text/width", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/golang.org/x/text/transform:go_default_library"], -) diff --git a/tests/smoke/vendor/google.golang.org/appengine/BUILD.bazel b/tests/smoke/vendor/google.golang.org/appengine/BUILD.bazel deleted file mode 100644 index 6355f6d56d0..00000000000 --- a/tests/smoke/vendor/google.golang.org/appengine/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "appengine.go", - "appengine_vm.go", - "errors.go", - "identity.go", - "namespace.go", - "timeout.go", - ], - importmap = "installer/tests/smoke/vendor/google.golang.org/appengine", - importpath = "google.golang.org/appengine", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/context:go_default_library", - "//tests/smoke/vendor/google.golang.org/appengine/internal:go_default_library", - "//tests/smoke/vendor/google.golang.org/appengine/internal/app_identity:go_default_library", - "//tests/smoke/vendor/google.golang.org/appengine/internal/modules:go_default_library", - ], -) diff --git a/tests/smoke/vendor/google.golang.org/appengine/internal/BUILD.bazel b/tests/smoke/vendor/google.golang.org/appengine/internal/BUILD.bazel deleted file mode 100644 index 8cdc4705da4..00000000000 --- a/tests/smoke/vendor/google.golang.org/appengine/internal/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api.go", - "api_common.go", - "app_id.go", - "identity.go", - "identity_vm.go", - "internal.go", - "metadata.go", - "net.go", - "transaction.go", - ], - importmap = "installer/tests/smoke/vendor/google.golang.org/appengine/internal", - importpath = "google.golang.org/appengine/internal", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/context:go_default_library", - "//tests/smoke/vendor/google.golang.org/appengine/internal/base:go_default_library", - "//tests/smoke/vendor/google.golang.org/appengine/internal/datastore:go_default_library", - "//tests/smoke/vendor/google.golang.org/appengine/internal/log:go_default_library", - "//tests/smoke/vendor/google.golang.org/appengine/internal/remote_api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/google.golang.org/appengine/internal/app_identity/BUILD.bazel b/tests/smoke/vendor/google.golang.org/appengine/internal/app_identity/BUILD.bazel deleted file mode 100644 index 365fbe85207..00000000000 --- a/tests/smoke/vendor/google.golang.org/appengine/internal/app_identity/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["app_identity_service.pb.go"], - importmap = "installer/tests/smoke/vendor/google.golang.org/appengine/internal/app_identity", - importpath = "google.golang.org/appengine/internal/app_identity", - visibility = ["//tests/smoke/vendor/google.golang.org/appengine:__subpackages__"], - deps = ["//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/tests/smoke/vendor/google.golang.org/appengine/internal/base/BUILD.bazel b/tests/smoke/vendor/google.golang.org/appengine/internal/base/BUILD.bazel deleted file mode 100644 index f9601c97946..00000000000 --- a/tests/smoke/vendor/google.golang.org/appengine/internal/base/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["api_base.pb.go"], - importmap = "installer/tests/smoke/vendor/google.golang.org/appengine/internal/base", - importpath = "google.golang.org/appengine/internal/base", - visibility = ["//tests/smoke/vendor/google.golang.org/appengine:__subpackages__"], - deps = ["//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/tests/smoke/vendor/google.golang.org/appengine/internal/datastore/BUILD.bazel b/tests/smoke/vendor/google.golang.org/appengine/internal/datastore/BUILD.bazel deleted file mode 100644 index 822d1925c6e..00000000000 --- a/tests/smoke/vendor/google.golang.org/appengine/internal/datastore/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["datastore_v3.pb.go"], - importmap = "installer/tests/smoke/vendor/google.golang.org/appengine/internal/datastore", - importpath = "google.golang.org/appengine/internal/datastore", - visibility = ["//tests/smoke/vendor/google.golang.org/appengine:__subpackages__"], - deps = ["//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/tests/smoke/vendor/google.golang.org/appengine/internal/log/BUILD.bazel b/tests/smoke/vendor/google.golang.org/appengine/internal/log/BUILD.bazel deleted file mode 100644 index 1d0dfcd48f7..00000000000 --- a/tests/smoke/vendor/google.golang.org/appengine/internal/log/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["log_service.pb.go"], - importmap = "installer/tests/smoke/vendor/google.golang.org/appengine/internal/log", - importpath = "google.golang.org/appengine/internal/log", - visibility = ["//tests/smoke/vendor/google.golang.org/appengine:__subpackages__"], - deps = ["//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/tests/smoke/vendor/google.golang.org/appengine/internal/modules/BUILD.bazel b/tests/smoke/vendor/google.golang.org/appengine/internal/modules/BUILD.bazel deleted file mode 100644 index 7c1a4dc8b53..00000000000 --- a/tests/smoke/vendor/google.golang.org/appengine/internal/modules/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["modules_service.pb.go"], - importmap = "installer/tests/smoke/vendor/google.golang.org/appengine/internal/modules", - importpath = "google.golang.org/appengine/internal/modules", - visibility = ["//tests/smoke/vendor/google.golang.org/appengine:__subpackages__"], - deps = ["//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/tests/smoke/vendor/google.golang.org/appengine/internal/remote_api/BUILD.bazel b/tests/smoke/vendor/google.golang.org/appengine/internal/remote_api/BUILD.bazel deleted file mode 100644 index 264e82c9af9..00000000000 --- a/tests/smoke/vendor/google.golang.org/appengine/internal/remote_api/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["remote_api.pb.go"], - importmap = "installer/tests/smoke/vendor/google.golang.org/appengine/internal/remote_api", - importpath = "google.golang.org/appengine/internal/remote_api", - visibility = ["//tests/smoke/vendor/google.golang.org/appengine:__subpackages__"], - deps = ["//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/tests/smoke/vendor/google.golang.org/appengine/internal/urlfetch/BUILD.bazel b/tests/smoke/vendor/google.golang.org/appengine/internal/urlfetch/BUILD.bazel deleted file mode 100644 index fec294b263d..00000000000 --- a/tests/smoke/vendor/google.golang.org/appengine/internal/urlfetch/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["urlfetch_service.pb.go"], - importmap = "installer/tests/smoke/vendor/google.golang.org/appengine/internal/urlfetch", - importpath = "google.golang.org/appengine/internal/urlfetch", - visibility = ["//tests/smoke/vendor/google.golang.org/appengine:__subpackages__"], - deps = ["//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/tests/smoke/vendor/google.golang.org/appengine/urlfetch/BUILD.bazel b/tests/smoke/vendor/google.golang.org/appengine/urlfetch/BUILD.bazel deleted file mode 100644 index adc92023b51..00000000000 --- a/tests/smoke/vendor/google.golang.org/appengine/urlfetch/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["urlfetch.go"], - importmap = "installer/tests/smoke/vendor/google.golang.org/appengine/urlfetch", - importpath = "google.golang.org/appengine/urlfetch", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/protobuf/proto:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/context:go_default_library", - "//tests/smoke/vendor/google.golang.org/appengine/internal:go_default_library", - "//tests/smoke/vendor/google.golang.org/appengine/internal/urlfetch:go_default_library", - ], -) diff --git a/tests/smoke/vendor/gopkg.in/inf.v0/BUILD.bazel b/tests/smoke/vendor/gopkg.in/inf.v0/BUILD.bazel deleted file mode 100644 index 5c498781464..00000000000 --- a/tests/smoke/vendor/gopkg.in/inf.v0/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "dec.go", - "rounder.go", - ], - importmap = "installer/tests/smoke/vendor/gopkg.in/inf.v0", - importpath = "gopkg.in/inf.v0", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/gopkg.in/yaml.v2/BUILD.bazel b/tests/smoke/vendor/gopkg.in/yaml.v2/BUILD.bazel deleted file mode 100644 index 596213199ca..00000000000 --- a/tests/smoke/vendor/gopkg.in/yaml.v2/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apic.go", - "decode.go", - "emitterc.go", - "encode.go", - "parserc.go", - "readerc.go", - "resolve.go", - "scannerc.go", - "sorter.go", - "writerc.go", - "yaml.go", - "yamlh.go", - "yamlprivateh.go", - ], - importmap = "installer/tests/smoke/vendor/gopkg.in/yaml.v2", - importpath = "gopkg.in/yaml.v2", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/equality/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/equality/BUILD.bazel deleted file mode 100644 index 1a92bd52273..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/equality/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["semantic.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/equality", - importpath = "k8s.io/apimachinery/pkg/api/equality", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD.bazel deleted file mode 100644 index 793d4f952ce..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors", - importpath = "k8s.io/apimachinery/pkg/api/errors", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel deleted file mode 100644 index daf433119a5..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "default.go", - "doc.go", - "errors.go", - "firsthit_restmapper.go", - "help.go", - "interfaces.go", - "meta.go", - "multirestmapper.go", - "priority.go", - "restmapper.go", - "unstructured.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta", - importpath = "k8s.io/apimachinery/pkg/api/meta", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD.bazel deleted file mode 100644 index 75afa269a39..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "amount.go", - "generated.pb.go", - "math.go", - "quantity.go", - "quantity_proto.go", - "scale_int.go", - "suffix.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource", - importpath = "k8s.io/apimachinery/pkg/api/resource", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/go-openapi/spec:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/spf13/pflag:go_default_library", - "//tests/smoke/vendor/gopkg.in/inf.v0:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/openapi:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/validation/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/validation/BUILD.bazel deleted file mode 100644 index c64bf109d1a..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/validation/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generic.go", - "objectmeta.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/api/validation", - importpath = "k8s.io/apimachinery/pkg/api/validation", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD.bazel deleted file mode 100644 index 29fbe47c5a5..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "types.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery", - importpath = "k8s.io/apimachinery/pkg/apimachinery", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD.bazel deleted file mode 100644 index e711f589705..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "announced.go", - "group_factory.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced", - importpath = "k8s.io/apimachinery/pkg/apimachinery/announced", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD.bazel deleted file mode 100644 index 44f75c32ca6..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["registered.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered", - importpath = "k8s.io/apimachinery/pkg/apimachinery/registered", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD.bazel deleted file mode 100644 index cdf90f86afa..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD.bazel +++ /dev/null @@ -1,45 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "doc.go", - "duration.go", - "generated.pb.go", - "group_version.go", - "helpers.go", - "labels.go", - "meta.go", - "micro_time.go", - "micro_time_proto.go", - "register.go", - "time.go", - "time_proto.go", - "types.go", - "types_swagger_doc_generated.go", - "watch.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1", - importpath = "k8s.io/apimachinery/pkg/apis/meta/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/go-openapi/spec:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/google/gofuzz:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/openapi:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/selection:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD.bazel deleted file mode 100644 index afe8033501e..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["unstructured.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - importpath = "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/unstructured:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD.bazel deleted file mode 100644 index a42d66e7e77..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["validation.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation", - importpath = "k8s.io/apimachinery/pkg/apis/meta/v1/validation", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD.bazel deleted file mode 100644 index ef5258b1bd9..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1", - importpath = "k8s.io/apimachinery/pkg/apis/meta/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/BUILD.bazel deleted file mode 100644 index 1b5cd3b5e78..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cloner.go", - "converter.go", - "deep_equal.go", - "doc.go", - "helper.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion", - importpath = "k8s.io/apimachinery/pkg/conversion", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD.bazel deleted file mode 100644 index 80b0410ca37..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "convert.go", - "doc.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/queryparams", - importpath = "k8s.io/apimachinery/pkg/conversion/queryparams", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/BUILD.bazel deleted file mode 100644 index 9cea868466f..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "converter.go", - "doc.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/unstructured", - importpath = "k8s.io/apimachinery/pkg/conversion/unstructured", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/fields/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/fields/BUILD.bazel deleted file mode 100644 index 911414d87e2..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/fields/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fields.go", - "requirements.go", - "selector.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/fields", - importpath = "k8s.io/apimachinery/pkg/fields", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apimachinery/pkg/selection:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel deleted file mode 100644 index 0f9c1cb3254..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "labels.go", - "selector.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/labels", - importpath = "k8s.io/apimachinery/pkg/labels", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/selection:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/openapi/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/openapi/BUILD.bazel deleted file mode 100644 index 3e0a9d47bd3..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/openapi/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "common.go", - "doc.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/openapi", - importpath = "k8s.io/apimachinery/pkg/openapi", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/emicklei/go-restful:go_default_library", - "//tests/smoke/vendor/github.com/go-openapi/spec:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel deleted file mode 100644 index f92e7c847d0..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "codec.go", - "codec_check.go", - "conversion.go", - "doc.go", - "embedded.go", - "error.go", - "extension.go", - "generated.pb.go", - "helper.go", - "interfaces.go", - "register.go", - "scheme.go", - "scheme_builder.go", - "swagger_doc_generator.go", - "types.go", - "types_proto.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime", - importpath = "k8s.io/apimachinery/pkg/runtime", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/queryparams:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD.bazel deleted file mode 100644 index 83320e6cea1..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "generated.pb.go", - "group_version.go", - "interfaces.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema", - importpath = "k8s.io/apimachinery/pkg/runtime/schema", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD.bazel deleted file mode 100644 index b874250fdff..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "codec_factory.go", - "negotiated_codec.go", - "protobuf_extension.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel deleted file mode 100644 index 27045211f52..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "json.go", - "meta.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer/json", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/ghodss/yaml:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/framer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD.bazel deleted file mode 100644 index f133aa4e58e..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "protobuf.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/framer:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD.bazel deleted file mode 100644 index 76f07259847..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["recognizer.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD.bazel deleted file mode 100644 index f70625e5f20..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["streaming.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer/streaming", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD.bazel deleted file mode 100644 index a5800cf0eff..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["versioning.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer/versioning", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/selection/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/selection/BUILD.bazel deleted file mode 100644 index d9c68488a71..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/selection/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["operator.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/selection", - importpath = "k8s.io/apimachinery/pkg/selection", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/types/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/types/BUILD.bazel deleted file mode 100644 index 58cc6cac38a..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/types/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "namespacedname.go", - "nodename.go", - "patch.go", - "uid.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/types", - importpath = "k8s.io/apimachinery/pkg/types", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD.bazel deleted file mode 100644 index 1b974fb5a1d..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cache.go", - "lruexpirecache.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/cache", - importpath = "k8s.io/apimachinery/pkg/util/cache", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/hashicorp/golang-lru:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD.bazel deleted file mode 100644 index 6a0112c6c6a..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["clock.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/clock", - importpath = "k8s.io/apimachinery/pkg/util/clock", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/diff/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/diff/BUILD.bazel deleted file mode 100644 index 25515fbe710..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/diff/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["diff.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/diff", - importpath = "k8s.io/apimachinery/pkg/util/diff", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD.bazel deleted file mode 100644 index 4c0042e296f..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors", - importpath = "k8s.io/apimachinery/pkg/util/errors", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD.bazel deleted file mode 100644 index 97046dee99a..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["framer.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/framer", - importpath = "k8s.io/apimachinery/pkg/util/framer", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel deleted file mode 100644 index 9043a1ac29a..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "generated.pb.go", - "intstr.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr", - importpath = "k8s.io/apimachinery/pkg/util/intstr", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/go-openapi/spec:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/github.com/google/gofuzz:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/openapi:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/json/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/json/BUILD.bazel deleted file mode 100644 index d6ea8cb15f4..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/json/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["json.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/json", - importpath = "k8s.io/apimachinery/pkg/util/json", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel deleted file mode 100644 index 4e22e97e187..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "errors.go", - "util.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/mergepatch", - importpath = "k8s.io/apimachinery/pkg/util/mergepatch", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//tests/smoke/vendor/github.com/ghodss/yaml:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel deleted file mode 100644 index a293ca2fa2a..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "http.go", - "interface.go", - "port_range.go", - "port_split.go", - "util.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net", - importpath = "k8s.io/apimachinery/pkg/util/net", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/http2:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/rand/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/rand/BUILD.bazel deleted file mode 100644 index 7e034934fe3..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/rand/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["rand.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/rand", - importpath = "k8s.io/apimachinery/pkg/util/rand", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel deleted file mode 100644 index 03606647413..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["runtime.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime", - importpath = "k8s.io/apimachinery/pkg/util/runtime", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/golang/glog:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD.bazel deleted file mode 100644 index 5143475a47a..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "byte.go", - "doc.go", - "empty.go", - "int.go", - "int64.go", - "string.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets", - importpath = "k8s.io/apimachinery/pkg/util/sets", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD.bazel deleted file mode 100644 index d54395c9774..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["patch.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/strategicpatch", - importpath = "k8s.io/apimachinery/pkg/util/strategicpatch", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/json:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD.bazel deleted file mode 100644 index fba8cd5addd..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["uuid.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/uuid", - importpath = "k8s.io/apimachinery/pkg/util/uuid", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/pborman/uuid:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD.bazel deleted file mode 100644 index d4b275933d3..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["validation.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation", - importpath = "k8s.io/apimachinery/pkg/util/validation", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD.bazel deleted file mode 100644 index 53eb669e6a9..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "errors.go", - "path.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field", - importpath = "k8s.io/apimachinery/pkg/util/validation/field", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD.bazel deleted file mode 100644 index e01dc3b28e5..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "wait.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait", - importpath = "k8s.io/apimachinery/pkg/util/wait", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel deleted file mode 100644 index b6693dcf059..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["decoder.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/util/yaml", - importpath = "k8s.io/apimachinery/pkg/util/yaml", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/ghodss/yaml:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/version/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/version/BUILD.bazel deleted file mode 100644 index b70a4673ac7..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/version/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "types.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/version", - importpath = "k8s.io/apimachinery/pkg/version", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel deleted file mode 100644 index 3c1a4cc0761..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "filter.go", - "mux.go", - "streamwatcher.go", - "until.go", - "watch.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/pkg/watch", - importpath = "k8s.io/apimachinery/pkg/watch", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD.bazel deleted file mode 100644 index 29604b89d5c..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["fields.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/json", - importpath = "k8s.io/apimachinery/third_party/forked/golang/json", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD.bazel b/tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD.bazel deleted file mode 100644 index d317dfe9e1b..00000000000 --- a/tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "deep_equal.go", - "type.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect", - importpath = "k8s.io/apimachinery/third_party/forked/golang/reflect", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD.bazel b/tests/smoke/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD.bazel deleted file mode 100644 index 695db4ae70a..00000000000 --- a/tests/smoke/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apiserver/pkg/apis/audit", - importpath = "k8s.io/apiserver/pkg/apis/audit", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel b/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel deleted file mode 100644 index 80cb7711136..00000000000 --- a/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interfaces.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/authenticator", - importpath = "k8s.io/apiserver/pkg/authentication/authenticator", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD.bazel b/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD.bazel deleted file mode 100644 index 2569a081180..00000000000 --- a/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["util.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount", - importpath = "k8s.io/apiserver/pkg/authentication/serviceaccount", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/user/BUILD.bazel b/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/user/BUILD.bazel deleted file mode 100644 index 1f776608034..00000000000 --- a/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/user/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "user.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/user", - importpath = "k8s.io/apiserver/pkg/authentication/user", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel b/tests/smoke/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel deleted file mode 100644 index 90e8e8e4e19..00000000000 --- a/tests/smoke/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "doc.go", - "requestcontext.go", - "requestinfo.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apiserver/pkg/endpoints/request", - importpath = "k8s.io/apiserver/pkg/endpoints/request", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/context:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apiserver/pkg/features/BUILD.bazel b/tests/smoke/vendor/k8s.io/apiserver/pkg/features/BUILD.bazel deleted file mode 100644 index feabdac1719..00000000000 --- a/tests/smoke/vendor/k8s.io/apiserver/pkg/features/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["kube_features.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apiserver/pkg/features", - importpath = "k8s.io/apiserver/pkg/features", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel b/tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel deleted file mode 100644 index 5b667ecbc19..00000000000 --- a/tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["feature_gate.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature", - importpath = "k8s.io/apiserver/pkg/util/feature", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/github.com/spf13/pflag:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/apiserver/pkg/util/flag/BUILD.bazel b/tests/smoke/vendor/k8s.io/apiserver/pkg/util/flag/BUILD.bazel deleted file mode 100644 index 0031c6f7b4e..00000000000 --- a/tests/smoke/vendor/k8s.io/apiserver/pkg/util/flag/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "configuration_map.go", - "flags.go", - "namedcertkey_flag.go", - "string_flag.go", - "tristate.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/apiserver/pkg/util/flag", - importpath = "k8s.io/apiserver/pkg/util/flag", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/github.com/spf13/pflag:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/discovery/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/discovery/BUILD.bazel deleted file mode 100644 index 66a5424f263..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/discovery/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "discovery_client.go", - "helper.go", - "restmapper.go", - "unstructured.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/discovery", - importpath = "k8s.io/client-go/discovery", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/emicklei/go-restful-swagger12:go_default_library", - "//tests/smoke/vendor/github.com/go-openapi/loads:go_default_library", - "//tests/smoke/vendor/github.com/go-openapi/spec:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/version:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/dynamic/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/dynamic/BUILD.bazel deleted file mode 100644 index 00088cab0fc..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/dynamic/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "client_pool.go", - "dynamic_util.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/dynamic", - importpath = "k8s.io/client-go/dynamic", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion/queryparams:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/BUILD.bazel deleted file mode 100644 index 5a021703908..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes", - importpath = "k8s.io/client-go/kubernetes", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/discovery:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/networking/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel deleted file mode 100644 index cb401f83229..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme", - importpath = "k8s.io/client-go/kubernetes/scheme", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD.bazel deleted file mode 100644 index dd11fd1e795..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "admissionregistration_client.go", - "doc.go", - "externaladmissionhookconfiguration.go", - "generated_expansion.go", - "initializerconfiguration.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", - importpath = "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel deleted file mode 100644 index 00ea4902630..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apps_client.go", - "controllerrevision.go", - "deployment.go", - "doc.go", - "generated_expansion.go", - "scale.go", - "statefulset.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/apps/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD.bazel deleted file mode 100644 index a0afdc8b5dc..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authentication_client.go", - "doc.go", - "generated_expansion.go", - "tokenreview.go", - "tokenreview_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1", - importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD.bazel deleted file mode 100644 index 0157772320a..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authentication_client.go", - "doc.go", - "generated_expansion.go", - "tokenreview.go", - "tokenreview_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD.bazel deleted file mode 100644 index 7742627955f..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authorization_client.go", - "doc.go", - "generated_expansion.go", - "localsubjectaccessreview.go", - "localsubjectaccessreview_expansion.go", - "selfsubjectaccessreview.go", - "selfsubjectaccessreview_expansion.go", - "subjectaccessreview.go", - "subjectaccessreview_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1", - importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD.bazel deleted file mode 100644 index 354ee0e6484..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authorization_client.go", - "doc.go", - "generated_expansion.go", - "localsubjectaccessreview.go", - "localsubjectaccessreview_expansion.go", - "selfsubjectaccessreview.go", - "selfsubjectaccessreview_expansion.go", - "subjectaccessreview.go", - "subjectaccessreview_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD.bazel deleted file mode 100644 index 79926ed0bfb..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "autoscaling_client.go", - "doc.go", - "generated_expansion.go", - "horizontalpodautoscaler.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1", - importpath = "k8s.io/client-go/kubernetes/typed/autoscaling/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/BUILD.bazel deleted file mode 100644 index 4e6b338b61d..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "autoscaling_client.go", - "doc.go", - "generated_expansion.go", - "horizontalpodautoscaler.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1", - importpath = "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD.bazel deleted file mode 100644 index 5247535de45..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "doc.go", - "generated_expansion.go", - "job.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v1", - importpath = "k8s.io/client-go/kubernetes/typed/batch/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD.bazel deleted file mode 100644 index 2518f85af6a..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "cronjob.go", - "doc.go", - "generated_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - importpath = "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD.bazel deleted file mode 100644 index d2fd928dd31..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "certificates_client.go", - "certificatesigningrequest.go", - "certificatesigningrequest_expansion.go", - "doc.go", - "generated_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel deleted file mode 100644 index 7857ff0838a..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel +++ /dev/null @@ -1,48 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "componentstatus.go", - "configmap.go", - "core_client.go", - "doc.go", - "endpoints.go", - "event.go", - "event_expansion.go", - "generated_expansion.go", - "limitrange.go", - "namespace.go", - "namespace_expansion.go", - "node.go", - "node_expansion.go", - "persistentvolume.go", - "persistentvolumeclaim.go", - "pod.go", - "pod_expansion.go", - "podtemplate.go", - "replicationcontroller.go", - "resourcequota.go", - "secret.go", - "service.go", - "service_expansion.go", - "serviceaccount.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/core/v1", - importpath = "k8s.io/client-go/kubernetes/typed/core/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1/ref:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel deleted file mode 100644 index e3924cc1479..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "deployment.go", - "deployment_expansion.go", - "doc.go", - "extensions_client.go", - "generated_expansion.go", - "ingress.go", - "podsecuritypolicy.go", - "replicaset.go", - "scale.go", - "scale_expansion.go", - "thirdpartyresource.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD.bazel deleted file mode 100644 index 62501213360..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "networking_client.go", - "networkpolicy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/networking/v1", - importpath = "k8s.io/client-go/kubernetes/typed/networking/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD.bazel deleted file mode 100644 index 47dd0cfd5f8..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "eviction.go", - "eviction_expansion.go", - "generated_expansion.go", - "poddisruptionbudget.go", - "policy_client.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/policy/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD.bazel deleted file mode 100644 index 5c1d042d35b..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "doc.go", - "generated_expansion.go", - "rbac_client.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - importpath = "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD.bazel deleted file mode 100644 index 6e0a0f474fc..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "doc.go", - "generated_expansion.go", - "rbac_client.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD.bazel deleted file mode 100644 index 254b853ece9..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "podpreset.go", - "settings_client.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - importpath = "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel deleted file mode 100644 index 8d917601c9c..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "storage_client.go", - "storageclass.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1", - importpath = "k8s.io/client-go/kubernetes/typed/storage/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD.bazel deleted file mode 100644 index 143e325dff3..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "storage_client.go", - "storageclass.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/storage/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/api/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/api/BUILD.bazel deleted file mode 100644 index 61bc45e1b53..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/api/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "annotation_key_constants.go", - "doc.go", - "field_constants.go", - "json.go", - "objectreference.go", - "register.go", - "resource.go", - "taint.go", - "toleration.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/api", - importpath = "k8s.io/client-go/pkg/api", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/api/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/api/v1/BUILD.bazel deleted file mode 100644 index a01a65ab558..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/api/v1/BUILD.bazel +++ /dev/null @@ -1,46 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "annotation_key_constants.go", - "conversion.go", - "defaults.go", - "doc.go", - "generate.go", - "generated.pb.go", - "meta.go", - "objectreference.go", - "register.go", - "resource.go", - "taint.go", - "toleration.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/api/v1", - importpath = "k8s.io/client-go/pkg/api/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/util:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/util/parsers:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/api/v1/ref/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/api/v1/ref/BUILD.bazel deleted file mode 100644 index 48434149223..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/api/v1/ref/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["ref.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/api/v1/ref", - importpath = "k8s.io/client-go/pkg/api/v1/ref", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration/BUILD.bazel deleted file mode 100644 index 04f81645e5d..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration", - importpath = "k8s.io/client-go/pkg/apis/admissionregistration", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration/v1alpha1/BUILD.bazel deleted file mode 100644 index 838af27616b..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration/v1alpha1", - importpath = "k8s.io/client-go/pkg/apis/admissionregistration/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/admissionregistration:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/BUILD.bazel deleted file mode 100644 index 5c51da07e35..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps", - importpath = "k8s.io/client-go/pkg/apis/apps", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/BUILD.bazel deleted file mode 100644 index fa477371ae4..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1", - importpath = "k8s.io/client-go/pkg/apis/apps/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/BUILD.bazel deleted file mode 100644 index 0bb2f3a1274..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication", - importpath = "k8s.io/client-go/pkg/apis/authentication", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1/BUILD.bazel deleted file mode 100644 index bc81c7a93e7..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1", - importpath = "k8s.io/client-go/pkg/apis/authentication/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/BUILD.bazel deleted file mode 100644 index c45363cf157..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1", - importpath = "k8s.io/client-go/pkg/apis/authentication/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authentication:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/BUILD.bazel deleted file mode 100644 index 31d7fb9c9b6..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization", - importpath = "k8s.io/client-go/pkg/apis/authorization", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1/BUILD.bazel deleted file mode 100644 index 80a8100d40c..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1", - importpath = "k8s.io/client-go/pkg/apis/authorization/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/BUILD.bazel deleted file mode 100644 index e2d4040722e..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1", - importpath = "k8s.io/client-go/pkg/apis/authorization/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/authorization:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/BUILD.bazel deleted file mode 100644 index 3927a6b4215..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "annotations.go", - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling", - importpath = "k8s.io/client-go/pkg/apis/autoscaling", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/BUILD.bazel deleted file mode 100644 index 030e3003df9..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1", - importpath = "k8s.io/client-go/pkg/apis/autoscaling/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/BUILD.bazel deleted file mode 100644 index 44adbd88e82..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1", - importpath = "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/autoscaling:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/BUILD.bazel deleted file mode 100644 index 87b51347657..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch", - importpath = "k8s.io/client-go/pkg/apis/batch", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v1/BUILD.bazel deleted file mode 100644 index 955ed398b4d..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v1/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v1", - importpath = "k8s.io/client-go/pkg/apis/batch/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/BUILD.bazel deleted file mode 100644 index dfe3add08b3..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1", - importpath = "k8s.io/client-go/pkg/apis/batch/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/batch/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates/BUILD.bazel deleted file mode 100644 index 16dc0ecd9c6..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates", - importpath = "k8s.io/client-go/pkg/apis/certificates", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/BUILD.bazel deleted file mode 100644 index 23041d849e9..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "helpers.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1", - importpath = "k8s.io/client-go/pkg/apis/certificates/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/certificates:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/BUILD.bazel deleted file mode 100644 index da68a075392..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions", - importpath = "k8s.io/client-go/pkg/apis/extensions", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/BUILD.bazel deleted file mode 100644 index bed72b5d010..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1", - importpath = "k8s.io/client-go/pkg/apis/extensions/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking/BUILD.bazel deleted file mode 100644 index f011071b0ba..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking", - importpath = "k8s.io/client-go/pkg/apis/networking", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking/v1/BUILD.bazel deleted file mode 100644 index 4c0636034b5..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking/v1/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking/v1", - importpath = "k8s.io/client-go/pkg/apis/networking/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/networking:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/BUILD.bazel deleted file mode 100644 index b25dd97a707..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy", - importpath = "k8s.io/client-go/pkg/apis/policy", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/BUILD.bazel deleted file mode 100644 index c32ba272a39..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1", - importpath = "k8s.io/client-go/pkg/apis/policy/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/policy:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/BUILD.bazel deleted file mode 100644 index fc5f1f4c94c..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac", - importpath = "k8s.io/client-go/pkg/apis/rbac", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/BUILD.bazel deleted file mode 100644 index 623599d6196..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "helpers.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1", - importpath = "k8s.io/client-go/pkg/apis/rbac/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/BUILD.bazel deleted file mode 100644 index 0a8c24f2b90..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "generated.pb.go", - "helpers.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1", - importpath = "k8s.io/client-go/pkg/apis/rbac/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/rbac:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings/BUILD.bazel deleted file mode 100644 index 504777c8618..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings", - importpath = "k8s.io/client-go/pkg/apis/settings", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/BUILD.bazel deleted file mode 100644 index c4697345923..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1", - importpath = "k8s.io/client-go/pkg/apis/settings/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/settings:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/BUILD.bazel deleted file mode 100644 index 979a21fd460..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage", - importpath = "k8s.io/client-go/pkg/apis/storage", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1/BUILD.bazel deleted file mode 100644 index bb9bbd584fc..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1", - importpath = "k8s.io/client-go/pkg/apis/storage/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/BUILD.bazel deleted file mode 100644 index 5c1e7352c2e..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1", - importpath = "k8s.io/client-go/pkg/apis/storage/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/apis/storage:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/util/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/util/BUILD.bazel deleted file mode 100644 index b743f870b28..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/util/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "template.go", - "umask.go", - "umask_windows.go", - "util.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/util", - importpath = "k8s.io/client-go/pkg/util", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/util/parsers/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/util/parsers/BUILD.bazel deleted file mode 100644 index 3f20be41565..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/util/parsers/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["parsers.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/util/parsers", - importpath = "k8s.io/client-go/pkg/util/parsers", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/docker/distribution/reference:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/pkg/version/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/pkg/version/BUILD.bazel deleted file mode 100644 index ef8a3244faf..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/pkg/version/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "base.go", - "doc.go", - "version.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/pkg/version", - importpath = "k8s.io/client-go/pkg/version", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apimachinery/pkg/version:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/BUILD.bazel deleted file mode 100644 index ee63e3d01f8..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["plugins.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth", - importpath = "k8s.io/client-go/plugin/pkg/client/auth", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD.bazel deleted file mode 100644 index 134b3780f41..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["azure.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure", - importpath = "k8s.io/client-go/plugin/pkg/client/auth/azure", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/Azure/go-autorest/autorest:go_default_library", - "//tests/smoke/vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD.bazel deleted file mode 100644 index b8b14a2f40b..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["gcp.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp", - importpath = "k8s.io/client-go/plugin/pkg/client/auth/gcp", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/golang.org/x/net/context:go_default_library", - "//tests/smoke/vendor/golang.org/x/oauth2:go_default_library", - "//tests/smoke/vendor/golang.org/x/oauth2/google:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/jsonpath:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD.bazel deleted file mode 100644 index 7ce054e4ec5..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["oidc.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc", - importpath = "k8s.io/client-go/plugin/pkg/client/auth/oidc", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/golang.org/x/oauth2:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/rest/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/rest/BUILD.bazel deleted file mode 100644 index 8077276e226..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/rest/BUILD.bazel +++ /dev/null @@ -1,40 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "config.go", - "plugin.go", - "request.go", - "transport.go", - "url_utils.go", - "urlbackoff.go", - "versions.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/rest", - importpath = "k8s.io/client-go/rest", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/version:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/metrics:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/transport:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/cert:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/rest/watch/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/rest/watch/BUILD.bazel deleted file mode 100644 index 7f7e4f657ee..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/rest/watch/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "decoder.go", - "encoder.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/rest/watch", - importpath = "k8s.io/client-go/rest/watch", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD.bazel deleted file mode 100644 index 96454ecc851..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "exec.go", - "funcs.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/third_party/forked/golang/template", - importpath = "k8s.io/client-go/third_party/forked/golang/template", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/tools/auth/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/tools/auth/BUILD.bazel deleted file mode 100644 index 0b51a7137dc..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/tools/auth/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["clientauth.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/tools/auth", - importpath = "k8s.io/client-go/tools/auth", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/tools/cache/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/tools/cache/BUILD.bazel deleted file mode 100644 index 641dac76b63..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/tools/cache/BUILD.bazel +++ /dev/null @@ -1,46 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "controller.go", - "delta_fifo.go", - "doc.go", - "expiration_cache.go", - "expiration_cache_fakes.go", - "fake_custom_store.go", - "fifo.go", - "index.go", - "listers.go", - "listwatch.go", - "mutation_cache.go", - "mutation_detector.go", - "reflector.go", - "shared_informer.go", - "store.go", - "thread_safe_store.go", - "undelta_store.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/tools/cache", - importpath = "k8s.io/client-go/tools/cache", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/cache:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel deleted file mode 100644 index baf0f2d0f14..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "auth_loaders.go", - "client_config.go", - "config.go", - "doc.go", - "helpers.go", - "loader.go", - "merged_client_builder.go", - "overrides.go", - "validation.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd", - importpath = "k8s.io/client-go/tools/clientcmd", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/github.com/howeyc/gopass:go_default_library", - "//tests/smoke/vendor/github.com/imdario/mergo:go_default_library", - "//tests/smoke/vendor/github.com/spf13/pflag:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/auth:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/latest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/homedir:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD.bazel deleted file mode 100644 index fd652a1aff9..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "helpers.go", - "register.go", - "types.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api", - importpath = "k8s.io/client-go/tools/clientcmd/api", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD.bazel deleted file mode 100644 index 13476909048..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["latest.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/latest", - importpath = "k8s.io/client-go/tools/clientcmd/api/latest", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD.bazel deleted file mode 100644 index c2b3b45b845..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "register.go", - "types.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api/v1", - importpath = "k8s.io/client-go/tools/clientcmd/api/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/tools/metrics/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/tools/metrics/BUILD.bazel deleted file mode 100644 index e39e6e4b935..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/tools/metrics/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["metrics.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/tools/metrics", - importpath = "k8s.io/client-go/tools/metrics", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/tools/record/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/tools/record/BUILD.bazel deleted file mode 100644 index 8fa7c0fc3c1..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/tools/record/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "event.go", - "events_cache.go", - "fake.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/tools/record", - importpath = "k8s.io/client-go/tools/record", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/github.com/golang/groupcache/lru:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1/ref:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/transport/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/transport/BUILD.bazel deleted file mode 100644 index 65696d5e267..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/transport/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cache.go", - "config.go", - "round_trippers.go", - "transport.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/transport", - importpath = "k8s.io/client-go/transport", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/util/cert/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/util/cert/BUILD.bazel deleted file mode 100644 index 2bf861c45f1..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/util/cert/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cert.go", - "csr.go", - "io.go", - "pem.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/util/cert", - importpath = "k8s.io/client-go/util/cert", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/util/flowcontrol/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/util/flowcontrol/BUILD.bazel deleted file mode 100644 index a8bf770a499..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/util/flowcontrol/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "backoff.go", - "throttle.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/util/flowcontrol", - importpath = "k8s.io/client-go/util/flowcontrol", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/juju/ratelimit:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/integer:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/util/homedir/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/util/homedir/BUILD.bazel deleted file mode 100644 index efeb2e79d96..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/util/homedir/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["homedir.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/util/homedir", - importpath = "k8s.io/client-go/util/homedir", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/util/integer/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/util/integer/BUILD.bazel deleted file mode 100644 index b9970819352..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/util/integer/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["integer.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/util/integer", - importpath = "k8s.io/client-go/util/integer", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/util/jsonpath/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/util/jsonpath/BUILD.bazel deleted file mode 100644 index eb1b1a3e9f9..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/util/jsonpath/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "jsonpath.go", - "node.go", - "parser.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/util/jsonpath", - importpath = "k8s.io/client-go/util/jsonpath", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/client-go/third_party/forked/golang/template:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/client-go/util/workqueue/BUILD.bazel b/tests/smoke/vendor/k8s.io/client-go/util/workqueue/BUILD.bazel deleted file mode 100644 index ae2760f8eb4..00000000000 --- a/tests/smoke/vendor/k8s.io/client-go/util/workqueue/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "default_rate_limiters.go", - "delaying_queue.go", - "doc.go", - "metrics.go", - "parallelizer.go", - "queue.go", - "rate_limitting_queue.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/client-go/util/workqueue", - importpath = "k8s.io/client-go/util/workqueue", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/juju/ratelimit:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD.bazel deleted file mode 100644 index fb6447bbdbd..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "env.go", - "register.go", - "types.go", - "well_known_labels.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm", - importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/BUILD.bazel deleted file mode 100644 index 4b534a220ec..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "annotations.go", - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation", - importpath = "k8s.io/kubernetes/federation/apis/federation", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/install/BUILD.bazel deleted file mode 100644 index 13245fd2d31..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/install/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/install", - importpath = "k8s.io/kubernetes/federation/apis/federation/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/BUILD.bazel deleted file mode 100644 index cd5beb047b8..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1", - importpath = "k8s.io/kubernetes/federation/apis/federation/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/BUILD.bazel deleted file mode 100644 index 42e3ff8c213..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset", - importpath = "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/discovery:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/autoscaling/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/batch/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme/BUILD.bazel deleted file mode 100644 index 35c3869f12d..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme", - importpath = "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/install:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/autoscaling/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/autoscaling/internalversion/BUILD.bazel deleted file mode 100644 index 46b469925d6..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/autoscaling/internalversion/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "autoscaling_client.go", - "doc.go", - "generated_expansion.go", - "horizontalpodautoscaler.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/autoscaling/internalversion", - importpath = "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/autoscaling/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/batch/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/batch/internalversion/BUILD.bazel deleted file mode 100644 index 1a4c2a3ed7e..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/batch/internalversion/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "doc.go", - "generated_expansion.go", - "job.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/batch/internalversion", - importpath = "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/batch/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/BUILD.bazel deleted file mode 100644 index b72fbb173ae..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "configmap.go", - "core_client.go", - "doc.go", - "event.go", - "generated_expansion.go", - "namespace.go", - "namespace_expansion.go", - "secret.go", - "service.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion", - importpath = "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/BUILD.bazel deleted file mode 100644 index e3bc845b386..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "deployment.go", - "doc.go", - "extensions_client.go", - "generated_expansion.go", - "ingress.go", - "replicaset.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion", - importpath = "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/extensions/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/BUILD.bazel deleted file mode 100644 index 3125ad0eb1c..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "doc.go", - "federation_client.go", - "generated_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion", - importpath = "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/BUILD.bazel deleted file mode 100644 index 1fc404fd4a7..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "annotation_key_constants.go", - "doc.go", - "field_constants.go", - "json.go", - "objectreference.go", - "register.go", - "resource.go", - "taint.go", - "toleration.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api", - importpath = "k8s.io/kubernetes/pkg/api", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/events/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/events/BUILD.bazel deleted file mode 100644 index 9b32b58886c..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/events/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["sorted_event_list.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/events", - importpath = "k8s.io/kubernetes/pkg/api/events", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper/BUILD.bazel deleted file mode 100644 index 1a6b7d13dea..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["helpers.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper", - importpath = "k8s.io/kubernetes/pkg/api/helper", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/selection:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper/qos/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper/qos/BUILD.bazel deleted file mode 100644 index 729557f678d..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper/qos/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["qos.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper/qos", - importpath = "k8s.io/kubernetes/pkg/api/helper/qos", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/install/BUILD.bazel deleted file mode 100644 index 003e9e88986..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/install/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/install", - importpath = "k8s.io/kubernetes/pkg/api/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD.bazel deleted file mode 100644 index cb0cca11799..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/pod/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["util.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/pod", - importpath = "k8s.io/kubernetes/pkg/api/pod", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/ref/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/ref/BUILD.bazel deleted file mode 100644 index 3cde2bcb3d1..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/ref/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["ref.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/ref", - importpath = "k8s.io/kubernetes/pkg/api/ref", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD.bazel deleted file mode 100644 index b13a10d33eb..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/resource/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["helpers.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/resource", - importpath = "k8s.io/kubernetes/pkg/api/resource", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/service/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/service/BUILD.bazel deleted file mode 100644 index 628c218c672..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/service/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["util.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/service", - importpath = "k8s.io/kubernetes/pkg/api/service", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/net/sets:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/util/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/util/BUILD.bazel deleted file mode 100644 index 4ce740ec201..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/util/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["group_version.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/util", - importpath = "k8s.io/kubernetes/pkg/api/util", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/BUILD.bazel deleted file mode 100644 index 8778da19e66..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/BUILD.bazel +++ /dev/null @@ -1,46 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "annotation_key_constants.go", - "conversion.go", - "defaults.go", - "doc.go", - "generate.go", - "generated.pb.go", - "meta.go", - "objectreference.go", - "register.go", - "resource.go", - "taint.go", - "toleration.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1", - importpath = "k8s.io/kubernetes/pkg/api/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/parsers:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper/BUILD.bazel deleted file mode 100644 index e0e33a3a1d2..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["helpers.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper", - importpath = "k8s.io/kubernetes/pkg/api/v1/helper", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/selection:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos/BUILD.bazel deleted file mode 100644 index 591fa68f420..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["qos.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos", - importpath = "k8s.io/kubernetes/pkg/api/v1/helper/qos", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/pod/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/pod/BUILD.bazel deleted file mode 100644 index be16533b1b0..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/pod/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["util.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/pod", - importpath = "k8s.io/kubernetes/pkg/api/v1/pod", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/ref/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/ref/BUILD.bazel deleted file mode 100644 index ea338c7e541..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/ref/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["ref.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/ref", - importpath = "k8s.io/kubernetes/pkg/api/v1/ref", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD.bazel deleted file mode 100644 index 7d70efa4c25..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/validation/BUILD.bazel +++ /dev/null @@ -1,44 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "events.go", - "schema.go", - "validation.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/api/validation", - importpath = "k8s.io/kubernetes/pkg/api/validation", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/emicklei/go-restful-swagger12:go_default_library", - "//tests/smoke/vendor/github.com/exponent-io/jsonpath:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/service:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/capabilities:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/features:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/security/apparmor:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/BUILD.bazel deleted file mode 100644 index 64f87ea4b09..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration", - importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/BUILD.bazel deleted file mode 100644 index 5ae892105de..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install", - importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/BUILD.bazel deleted file mode 100644 index 86a94c8c9cb..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1", - importpath = "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD.bazel deleted file mode 100644 index f19575f1b91..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps", - importpath = "k8s.io/kubernetes/pkg/apis/apps", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD.bazel deleted file mode 100644 index 2df5010dc19..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/install/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/install", - importpath = "k8s.io/kubernetes/pkg/apis/apps/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD.bazel deleted file mode 100644 index 4df5f668ecc..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1", - importpath = "k8s.io/kubernetes/pkg/apis/apps/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD.bazel deleted file mode 100644 index 64a68d91287..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication", - importpath = "k8s.io/kubernetes/pkg/apis/authentication", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD.bazel deleted file mode 100644 index 373a7de7974..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/install/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/install", - importpath = "k8s.io/kubernetes/pkg/apis/authentication/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD.bazel deleted file mode 100644 index b4a400b17ae..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1", - importpath = "k8s.io/kubernetes/pkg/apis/authentication/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD.bazel deleted file mode 100644 index 8197099bb73..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1", - importpath = "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD.bazel deleted file mode 100644 index fc1f6edcf43..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization", - importpath = "k8s.io/kubernetes/pkg/apis/authorization", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD.bazel deleted file mode 100644 index 5dbc0140466..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/install", - importpath = "k8s.io/kubernetes/pkg/apis/authorization/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD.bazel deleted file mode 100644 index 43dda53bd6b..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1", - importpath = "k8s.io/kubernetes/pkg/apis/authorization/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD.bazel deleted file mode 100644 index d4a5d5465fa..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - importpath = "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD.bazel deleted file mode 100644 index 468793c3b93..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "annotations.go", - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling", - importpath = "k8s.io/kubernetes/pkg/apis/autoscaling", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD.bazel deleted file mode 100644 index 7277881a9d1..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install", - importpath = "k8s.io/kubernetes/pkg/apis/autoscaling/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD.bazel deleted file mode 100644 index 90081303930..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1", - importpath = "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/BUILD.bazel deleted file mode 100644 index 50f4aaee8d0..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1", - importpath = "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD.bazel deleted file mode 100644 index 9247d266047..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch", - importpath = "k8s.io/kubernetes/pkg/apis/batch", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD.bazel deleted file mode 100644 index adf42389666..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/install/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/install", - importpath = "k8s.io/kubernetes/pkg/apis/batch/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD.bazel deleted file mode 100644 index e1e029d663c..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v1", - importpath = "k8s.io/kubernetes/pkg/apis/batch/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD.bazel deleted file mode 100644 index 087f1d89cf9..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - importpath = "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD.bazel deleted file mode 100644 index 12c5749cb81..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates", - importpath = "k8s.io/kubernetes/pkg/apis/certificates", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD.bazel deleted file mode 100644 index c355212be74..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/install/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/install", - importpath = "k8s.io/kubernetes/pkg/apis/certificates/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD.bazel deleted file mode 100644 index bbee5e5ae91..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "helpers.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1", - importpath = "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/BUILD.bazel deleted file mode 100644 index a57f2390f38..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig", - importpath = "k8s.io/kubernetes/pkg/apis/componentconfig", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/BUILD.bazel deleted file mode 100644 index 21b107d7592..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install", - importpath = "k8s.io/kubernetes/pkg/apis/componentconfig/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/BUILD.bazel deleted file mode 100644 index 023fbe260fa..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "register.go", - "types.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", - importpath = "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/apis:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/qos:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/master/ports:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD.bazel deleted file mode 100644 index e9f6a682c60..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions", - importpath = "k8s.io/kubernetes/pkg/apis/extensions", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD.bazel deleted file mode 100644 index 8510fae72e9..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/install", - importpath = "k8s.io/kubernetes/pkg/apis/extensions/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD.bazel deleted file mode 100644 index 8917ee56c84..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - importpath = "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/BUILD.bazel deleted file mode 100644 index 964bc648a27..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking", - importpath = "k8s.io/kubernetes/pkg/apis/networking", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/install/BUILD.bazel deleted file mode 100644 index d3334c0e1cb..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/install/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/install", - importpath = "k8s.io/kubernetes/pkg/apis/networking/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/BUILD.bazel deleted file mode 100644 index 328c4a43917..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/v1/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/v1", - importpath = "k8s.io/kubernetes/pkg/apis/networking/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD.bazel deleted file mode 100644 index 569cd423737..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy", - importpath = "k8s.io/kubernetes/pkg/apis/policy", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD.bazel deleted file mode 100644 index df445dd3698..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/install/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/install", - importpath = "k8s.io/kubernetes/pkg/apis/policy/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD.bazel deleted file mode 100644 index a47266cc0ef..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1", - importpath = "k8s.io/kubernetes/pkg/apis/policy/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD.bazel deleted file mode 100644 index b5bcceecb61..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac", - importpath = "k8s.io/kubernetes/pkg/apis/rbac", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD.bazel deleted file mode 100644 index 834c94ddf23..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/install", - importpath = "k8s.io/kubernetes/pkg/apis/rbac/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD.bazel deleted file mode 100644 index 1d94fa7534d..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "defaults.go", - "doc.go", - "generated.pb.go", - "helpers.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - importpath = "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD.bazel deleted file mode 100644 index 562b11d8783..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "doc.go", - "generated.pb.go", - "helpers.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1", - importpath = "k8s.io/kubernetes/pkg/apis/rbac/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/BUILD.bazel deleted file mode 100644 index 8b1376cb939..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings", - importpath = "k8s.io/kubernetes/pkg/apis/settings", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD.bazel deleted file mode 100644 index 48b97881ba3..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/install/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/install", - importpath = "k8s.io/kubernetes/pkg/apis/settings/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD.bazel deleted file mode 100644 index 21cf4a5073c..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1", - importpath = "k8s.io/kubernetes/pkg/apis/settings/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD.bazel deleted file mode 100644 index aa927ffde8b..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage", - importpath = "k8s.io/kubernetes/pkg/apis/storage", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD.bazel deleted file mode 100644 index cf5a61d4554..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/install/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/install", - importpath = "k8s.io/kubernetes/pkg/apis/storage/install", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD.bazel deleted file mode 100644 index 08452533aeb..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/util/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["helpers.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/util", - importpath = "k8s.io/kubernetes/pkg/apis/storage/util", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD.bazel deleted file mode 100644 index 7456380c283..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1", - importpath = "k8s.io/kubernetes/pkg/apis/storage/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD.bazel deleted file mode 100644 index 8bb2d8e5f63..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1", - importpath = "k8s.io/kubernetes/pkg/apis/storage/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD.bazel deleted file mode 100644 index c78007d56aa..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/capabilities/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "capabilities.go", - "doc.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/capabilities", - importpath = "k8s.io/kubernetes/pkg/capabilities", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/BUILD.bazel deleted file mode 100644 index dd1c76ddb9b..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/BUILD.bazel +++ /dev/null @@ -1,52 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - "import_known_versions.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/discovery:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel deleted file mode 100644 index 170d9840d81..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/BUILD.bazel deleted file mode 100644 index 79243df3876..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "admissionregistration_client.go", - "doc.go", - "externaladmissionhookconfiguration.go", - "generated_expansion.go", - "initializerconfiguration.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/admissionregistration/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/BUILD.bazel deleted file mode 100644 index 8a21c156419..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apps_client.go", - "controllerrevision.go", - "deployment.go", - "doc.go", - "generated_expansion.go", - "scale.go", - "statefulset.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/BUILD.bazel deleted file mode 100644 index 1043c8f6ae6..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authentication_client.go", - "doc.go", - "generated_expansion.go", - "tokenreview.go", - "tokenreview_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/BUILD.bazel deleted file mode 100644 index 383111ab4db..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authentication_client.go", - "doc.go", - "generated_expansion.go", - "tokenreview.go", - "tokenreview_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/BUILD.bazel deleted file mode 100644 index 13950689e26..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authorization_client.go", - "doc.go", - "generated_expansion.go", - "localsubjectaccessreview.go", - "localsubjectaccessreview_expansion.go", - "selfsubjectaccessreview.go", - "selfsubjectaccessreview_expansion.go", - "subjectaccessreview.go", - "subjectaccessreview_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/BUILD.bazel deleted file mode 100644 index d025df6d8e7..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authorization_client.go", - "doc.go", - "generated_expansion.go", - "localsubjectaccessreview.go", - "localsubjectaccessreview_expansion.go", - "selfsubjectaccessreview.go", - "selfsubjectaccessreview_expansion.go", - "subjectaccessreview.go", - "subjectaccessreview_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/BUILD.bazel deleted file mode 100644 index e3c14bd48b3..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "autoscaling_client.go", - "doc.go", - "generated_expansion.go", - "horizontalpodautoscaler.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/BUILD.bazel deleted file mode 100644 index 20806912e79..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "autoscaling_client.go", - "doc.go", - "generated_expansion.go", - "horizontalpodautoscaler.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/autoscaling/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/BUILD.bazel deleted file mode 100644 index c77e06df1c5..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "doc.go", - "generated_expansion.go", - "job.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/BUILD.bazel deleted file mode 100644 index 7839ad72fb0..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "cronjob.go", - "doc.go", - "generated_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/batch/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/BUILD.bazel deleted file mode 100644 index e749f8d69c9..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "certificates_client.go", - "certificatesigningrequest.go", - "certificatesigningrequest_expansion.go", - "doc.go", - "generated_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/certificates/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD.bazel deleted file mode 100644 index 0c2e523aa93..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1/BUILD.bazel +++ /dev/null @@ -1,48 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "componentstatus.go", - "configmap.go", - "core_client.go", - "doc.go", - "endpoints.go", - "event.go", - "event_expansion.go", - "generated_expansion.go", - "limitrange.go", - "namespace.go", - "namespace_expansion.go", - "node.go", - "node_expansion.go", - "persistentvolume.go", - "persistentvolumeclaim.go", - "pod.go", - "pod_expansion.go", - "podtemplate.go", - "replicationcontroller.go", - "resourcequota.go", - "secret.go", - "service.go", - "service_expansion.go", - "serviceaccount.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/ref:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/BUILD.bazel deleted file mode 100644 index 6f4df591313..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "deployment.go", - "deployment_expansion.go", - "doc.go", - "extensions_client.go", - "generated_expansion.go", - "ingress.go", - "podsecuritypolicy.go", - "replicaset.go", - "scale.go", - "scale_expansion.go", - "thirdpartyresource.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1/BUILD.bazel deleted file mode 100644 index 1218b0f9005..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "networking_client.go", - "networkpolicy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/networking/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/BUILD.bazel deleted file mode 100644 index 4723f5d56cf..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "eviction.go", - "eviction_expansion.go", - "generated_expansion.go", - "poddisruptionbudget.go", - "policy_client.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/policy/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/BUILD.bazel deleted file mode 100644 index 57633d8dc03..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "doc.go", - "generated_expansion.go", - "rbac_client.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/BUILD.bazel deleted file mode 100644 index 628f131c913..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "doc.go", - "generated_expansion.go", - "rbac_client.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/BUILD.bazel deleted file mode 100644 index 6226a9a0165..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "podpreset.go", - "settings_client.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/settings/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/BUILD.bazel deleted file mode 100644 index dd85f3b0bdb..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "storage_client.go", - "storageclass.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/BUILD.bazel deleted file mode 100644 index e70551886b8..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "storage_client.go", - "storageclass.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/storage/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD.bazel deleted file mode 100644 index f12864f2a21..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/discovery:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD.bazel deleted file mode 100644 index f75be6ce9bd..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "register_custom.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings/install:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/install:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/BUILD.bazel deleted file mode 100644 index c5bc7a106d9..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "admissionregistration_client.go", - "doc.go", - "externaladmissionhookconfiguration.go", - "generated_expansion.go", - "initializerconfiguration.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/admissionregistration:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD.bazel deleted file mode 100644 index 17bc82d1010..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apps_client.go", - "controllerrevision.go", - "doc.go", - "generated_expansion.go", - "statefulset.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD.bazel deleted file mode 100644 index f32f69de7db..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authentication_client.go", - "doc.go", - "generated_expansion.go", - "tokenreview.go", - "tokenreview_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD.bazel deleted file mode 100644 index 4e9b677d52e..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authorization_client.go", - "doc.go", - "generated_expansion.go", - "localsubjectaccessreview.go", - "localsubjectaccessreview_expansion.go", - "selfsubjectaccessreview.go", - "selfsubjectaccessreview_expansion.go", - "subjectaccessreview.go", - "subjectaccessreview_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authorization:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD.bazel deleted file mode 100644 index c3eb1f62b2e..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "autoscaling_client.go", - "doc.go", - "generated_expansion.go", - "horizontalpodautoscaler.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD.bazel deleted file mode 100644 index 37fa27f719e..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "cronjob.go", - "doc.go", - "generated_expansion.go", - "job.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD.bazel deleted file mode 100644 index ed41ed46284..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "certificates_client.go", - "certificatesigningrequest.go", - "certificatesigningrequest_expansion.go", - "doc.go", - "generated_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD.bazel deleted file mode 100644 index 679ed080026..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion/BUILD.bazel +++ /dev/null @@ -1,47 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "componentstatus.go", - "configmap.go", - "core_client.go", - "doc.go", - "endpoints.go", - "event.go", - "event_expansion.go", - "generated_expansion.go", - "limitrange.go", - "namespace.go", - "namespace_expansion.go", - "node.go", - "node_expansion.go", - "persistentvolume.go", - "persistentvolumeclaim.go", - "pod.go", - "pod_expansion.go", - "podtemplate.go", - "replicationcontroller.go", - "resourcequota.go", - "secret.go", - "service.go", - "service_expansion.go", - "serviceaccount.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/ref:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD.bazel deleted file mode 100644 index 54c756bd26d..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "deployment.go", - "deployment_expansion.go", - "doc.go", - "extensions_client.go", - "generated_expansion.go", - "ingress.go", - "networkpolicy.go", - "podsecuritypolicy.go", - "replicaset.go", - "scale.go", - "scale_expansion.go", - "thirdpartyresource.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/BUILD.bazel deleted file mode 100644 index 4af0b93060c..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "networking_client.go", - "networkpolicy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD.bazel deleted file mode 100644 index 73573daece8..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "eviction.go", - "eviction_expansion.go", - "generated_expansion.go", - "poddisruptionbudget.go", - "policy_client.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD.bazel deleted file mode 100644 index 714a9b2e770..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "doc.go", - "generated_expansion.go", - "rbac_client.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/BUILD.bazel deleted file mode 100644 index 1a4e7ebe360..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "podpreset.go", - "settings_client.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD.bazel deleted file mode 100644 index 44fa3eeb754..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "storage_client.go", - "storageclass.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", - importpath = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1/BUILD.bazel deleted file mode 100644 index e407702b5f7..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "controllerrevision.go", - "deployment.go", - "interface.go", - "statefulset.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/cache:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/internalinterfaces:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/apps/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1/BUILD.bazel deleted file mode 100644 index 94f25a84dd3..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "componentstatus.go", - "configmap.go", - "endpoints.go", - "event.go", - "interface.go", - "limitrange.go", - "namespace.go", - "node.go", - "persistentvolume.go", - "persistentvolumeclaim.go", - "pod.go", - "podtemplate.go", - "replicationcontroller.go", - "resourcequota.go", - "secret.go", - "service.go", - "serviceaccount.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1", - importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/cache:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/internalinterfaces:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/core/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1/BUILD.bazel deleted file mode 100644 index 82e3881d27f..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "deployment.go", - "ingress.go", - "interface.go", - "podsecuritypolicy.go", - "replicaset.go", - "thirdpartyresource.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/cache:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/internalinterfaces:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/internalinterfaces/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/internalinterfaces/BUILD.bazel deleted file mode 100644 index 65303aa1bb6..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/internalinterfaces/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["factory_interfaces.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/internalinterfaces", - importpath = "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/internalinterfaces", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/cache:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/BUILD.bazel deleted file mode 100644 index 2f9ee3908a7..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "configmaplock.go", - "endpointslock.go", - "interface.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/leaderelection/resourcelock", - importpath = "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/record:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/apps/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/apps/v1beta1/BUILD.bazel deleted file mode 100644 index bc850eb91bb..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/apps/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "controllerrevision.go", - "deployment.go", - "expansion_generated.go", - "scale.go", - "statefulset.go", - "statefulset_expansion.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/apps/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/listers/apps/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/cache:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/core/v1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/core/v1/BUILD.bazel deleted file mode 100644 index 8e88ade8b23..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/core/v1/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "componentstatus.go", - "configmap.go", - "endpoints.go", - "event.go", - "expansion_generated.go", - "limitrange.go", - "namespace.go", - "node.go", - "node_expansion.go", - "persistentvolume.go", - "persistentvolumeclaim.go", - "pod.go", - "podtemplate.go", - "replicationcontroller.go", - "replicationcontroller_expansion.go", - "resourcequota.go", - "secret.go", - "service.go", - "service_expansion.go", - "serviceaccount.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/core/v1", - importpath = "k8s.io/kubernetes/pkg/client/listers/core/v1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/cache:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1/BUILD.bazel deleted file mode 100644 index c3e0fcf09e0..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "daemonset_expansion.go", - "deployment.go", - "deployment_expansion.go", - "expansion_generated.go", - "ingress.go", - "podsecuritypolicy.go", - "replicaset.go", - "replicaset_expansion.go", - "scale.go", - "thirdpartyresource.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1", - importpath = "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/cache:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/retry/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/retry/BUILD.bazel deleted file mode 100644 index 6fdf2593855..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/retry/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["util.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/retry", - importpath = "k8s.io/kubernetes/pkg/client/retry", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/unversioned/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/unversioned/BUILD.bazel deleted file mode 100644 index d9c32e3487d..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/unversioned/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conditions.go", - "helper.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/client/unversioned", - importpath = "k8s.io/kubernetes/pkg/client/unversioned", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/plugin/pkg/client/auth:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/pod:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/BUILD.bazel deleted file mode 100644 index 95bae686878..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/BUILD.bazel +++ /dev/null @@ -1,55 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "client_builder.go", - "controller_ref_manager.go", - "controller_utils.go", - "doc.go", - "lookup_cache.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller", - importpath = "k8s.io/kubernetes/pkg/controller", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/github.com/golang/groupcache/lru:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/cache:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/record:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/integer:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/pod:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/ref:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/authentication/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authentication/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/retry:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/serviceaccount:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/hash:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD.bazel deleted file mode 100644 index 03bbbb5f3c2..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon/BUILD.bazel +++ /dev/null @@ -1,54 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "daemoncontroller.go", - "doc.go", - "update.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon", - importpath = "k8s.io/kubernetes/pkg/controller/daemon", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/cache:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/record:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/workqueue:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/pod:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/core/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/core/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/controller:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/features:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/metrics:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD.bazel deleted file mode 100644 index fe098905e57..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon/util/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["daemonset_util.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon/util", - importpath = "k8s.io/kubernetes/pkg/controller/daemon/util", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/pod:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD.bazel deleted file mode 100644 index 657cf02801f..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "deployment_util.go", - "pod_util.go", - "replicaset_util.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/deployment/util", - importpath = "k8s.io/kubernetes/pkg/controller/deployment/util", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/integer:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/core/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/retry:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/controller:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/labels:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD.bazel deleted file mode 100644 index 7fcc664c26b..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/credentialprovider/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "doc.go", - "keyring.go", - "plugins.go", - "provider.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/credentialprovider", - importpath = "k8s.io/kubernetes/pkg/credentialprovider", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/docker/engine-api/types:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/features/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/features/BUILD.bazel deleted file mode 100644 index 1a6c8ef23ae..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/features/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["kube_features.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/features", - importpath = "k8s.io/kubernetes/pkg/features", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apiserver/pkg/features:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/fieldpath/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/fieldpath/BUILD.bazel deleted file mode 100644 index ab44f9a6090..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/fieldpath/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fieldpath.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/fieldpath", - importpath = "k8s.io/kubernetes/pkg/fieldpath", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD.bazel deleted file mode 100644 index 6e806594305..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/BUILD.bazel +++ /dev/null @@ -1,109 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apply.go", - "autoscale.go", - "bash_comp_utils.go", - "cluster.go", - "clusterrolebinding.go", - "configmap.go", - "deployment.go", - "doc.go", - "env_file.go", - "explain.go", - "generate.go", - "history.go", - "interfaces.go", - "kubectl.go", - "namespace.go", - "pdb.go", - "proxy_server.go", - "quota.go", - "resource_filter.go", - "rolebinding.go", - "rollback.go", - "rolling_updater.go", - "rollout_status.go", - "run.go", - "scale.go", - "secret.go", - "secret_for_docker_registry.go", - "secret_for_tls.go", - "service.go", - "service_basic.go", - "serviceaccount.go", - "sorting_printer.go", - "stop.go", - "versioned_client.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl", - importpath = "k8s.io/kubernetes/pkg/kubectl", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/emicklei/go-restful-swagger12:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/github.com/spf13/cobra:go_default_library", - "//tests/smoke/vendor/github.com/spf13/pflag:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/integer:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/jsonpath:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/pod:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/retry:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/unversioned:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/controller:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/daemon:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/deployment/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/credentialprovider:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/printers:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/printers/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/slice:go_default_library", - "//tests/smoke/vendor/vbom.ml/util/sortorder:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD.bazel deleted file mode 100644 index d0a8ac3f7c3..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/BUILD.bazel +++ /dev/null @@ -1,69 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cached_discovery.go", - "clientcache.go", - "factory.go", - "factory_builder.go", - "factory_client_access.go", - "factory_object_mapping.go", - "helpers.go", - "printing.go", - "shortcut_restmapper.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util", - importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/emicklei/go-restful-swagger12:go_default_library", - "//tests/smoke/vendor/github.com/evanphx/json-patch:go_default_library", - "//tests/smoke/vendor/github.com/go-openapi/spec:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/github.com/spf13/cobra:go_default_library", - "//tests/smoke/vendor/github.com/spf13/pflag:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/version:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/util/flag:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/discovery:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/dynamic:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/clientcmd:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/homedir:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/unversioned:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/controller:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/plugins:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/printers:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/printers/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/exec:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/version:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD.bazel deleted file mode 100644 index 93622f42ab4..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "extensions.go", - "openapi.go", - "openapi_cache.go", - "openapi_getter.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi", - importpath = "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/go-openapi/spec:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/discovery:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/version:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/BUILD.bazel deleted file mode 100644 index 1ad6acd6867..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/plugins/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "env.go", - "loader.go", - "plugins.go", - "runner.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/plugins", - importpath = "k8s.io/kubernetes/pkg/kubectl/plugins", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/ghodss/yaml:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/github.com/spf13/pflag:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/clientcmd:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/resource/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/resource/BUILD.bazel deleted file mode 100644 index 01bbc8c984a..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/resource/BUILD.bazel +++ /dev/null @@ -1,41 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "builder.go", - "categories.go", - "doc.go", - "helper.go", - "interfaces.go", - "mapper.go", - "result.go", - "selector.go", - "visitor.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/resource", - importpath = "k8s.io/kubernetes/pkg/kubectl/resource", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/encoding/unicode:go_default_library", - "//tests/smoke/vendor/golang.org/x/text/transform:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/discovery:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD.bazel deleted file mode 100644 index 50dea239360..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/util/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["util.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubectl/util", - importpath = "k8s.io/kubernetes/pkg/kubectl/util", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD.bazel deleted file mode 100644 index c9de0c0ba54..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "well_known_annotations.go", - "well_known_labels.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/apis", - importpath = "k8s.io/kubernetes/pkg/kubelet/apis", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD.bazel deleted file mode 100644 index a960245b294..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/qos/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "policy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/qos", - importpath = "k8s.io/kubernetes/pkg/kubelet/qos", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD.bazel deleted file mode 100644 index 32298bb9bae..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "constants.go", - "doc.go", - "labels.go", - "pod_update.go", - "types.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/types", - importpath = "k8s.io/kubernetes/pkg/kubelet/types", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/master/ports/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/master/ports/BUILD.bazel deleted file mode 100644 index c90b58771eb..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/master/ports/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "ports.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/master/ports", - importpath = "k8s.io/kubernetes/pkg/master/ports", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/printers/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/printers/BUILD.bazel deleted file mode 100644 index 0f3e470486a..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/printers/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "common.go", - "customcolumn.go", - "humanreadable.go", - "interface.go", - "json.go", - "jsonpath.go", - "name.go", - "printers.go", - "tabwriter.go", - "template.go", - "versioned.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/printers", - importpath = "k8s.io/kubernetes/pkg/printers", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/fatih/camelcase:go_default_library", - "//tests/smoke/vendor/github.com/ghodss/yaml:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/jsonpath:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/slice:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD.bazel deleted file mode 100644 index 182a1751f82..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/printers/internalversion/BUILD.bazel +++ /dev/null @@ -1,63 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "describe.go", - "printers.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/printers/internalversion", - importpath = "k8s.io/kubernetes/pkg/printers/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/fatih/camelcase:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/dynamic:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/apis/federation:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/events:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/helper/qos:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/ref:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/autoscaling:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/batch:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/certificates:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/networking:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/policy:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/settings:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/controller:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/controller/deployment/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/fieldpath:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/printers:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/node:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/slice:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD.bazel deleted file mode 100644 index 60c685f2932..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "policy_compact.go", - "policy_comparator.go", - "rule.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/registry/rbac/validation", - importpath = "k8s.io/kubernetes/pkg/registry/rbac/validation", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/rbac:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD.bazel deleted file mode 100644 index 44c4364b4f8..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/security/apparmor/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "helpers.go", - "validate.go", - "validate_disabled.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/security/apparmor", - importpath = "k8s.io/kubernetes/pkg/security/apparmor", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/features:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD.bazel deleted file mode 100644 index 51c96697bf6..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/serviceaccount/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "jwt.go", - "util.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/serviceaccount", - importpath = "k8s.io/kubernetes/pkg/serviceaccount", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/dgrijalva/jwt-go:go_default_library", - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/cert:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/BUILD.bazel deleted file mode 100644 index 1c1fe5def00..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "template.go", - "umask.go", - "umask_windows.go", - "util.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/util", - importpath = "k8s.io/kubernetes/pkg/util", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/exec/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/exec/BUILD.bazel deleted file mode 100644 index 2df2e2f1f86..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/exec/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "exec.go", - "fake_exec.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/exec", - importpath = "k8s.io/kubernetes/pkg/util/exec", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD.bazel deleted file mode 100644 index 0c243a76983..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/hash/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["hash.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/hash", - importpath = "k8s.io/kubernetes/pkg/util/hash", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/davecgh/go-spew/spew:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD.bazel deleted file mode 100644 index c093edd5ede..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/labels/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "labels.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/labels", - importpath = "k8s.io/kubernetes/pkg/util/labels", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/metrics/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/metrics/BUILD.bazel deleted file mode 100644 index 5cfcfb209e9..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/metrics/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["util.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/metrics", - importpath = "k8s.io/kubernetes/pkg/util/metrics", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD.bazel deleted file mode 100644 index 54be1b2d749..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fake.go", - "mount.go", - "mount_linux.go", - "mount_unsupported.go", - "nsenter_mount.go", - "nsenter_mount_unsupported.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/mount", - importpath = "k8s.io/kubernetes/pkg/util/mount", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/exec:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD.bazel deleted file mode 100644 index 548a9f9bdb2..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/net/sets/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "ipnet.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/net/sets", - importpath = "k8s.io/kubernetes/pkg/util/net/sets", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/node/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/node/BUILD.bazel deleted file mode 100644 index 68c66d396bb..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/node/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["node.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/node", - importpath = "k8s.io/kubernetes/pkg/util/node", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/apis:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD.bazel deleted file mode 100644 index 39171630675..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/parsers/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["parsers.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/parsers", - importpath = "k8s.io/kubernetes/pkg/util/parsers", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/github.com/docker/distribution/reference:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD.bazel deleted file mode 100644 index 37de249f58f..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/slice/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["slice.go"], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/util/slice", - importpath = "k8s.io/kubernetes/pkg/util/slice", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/version/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/version/BUILD.bazel deleted file mode 100644 index 1b5e2f2e6b0..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/version/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "base.go", - "doc.go", - "version.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/version", - importpath = "k8s.io/kubernetes/pkg/version", - visibility = ["//visibility:public"], - deps = ["//tests/smoke/vendor/k8s.io/apimachinery/pkg/version:go_default_library"], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD.bazel deleted file mode 100644 index bc0b2f64152..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD.bazel +++ /dev/null @@ -1,65 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "atomic_writer.go", - "device_util.go", - "device_util_linux.go", - "device_util_unsupported.go", - "doc.go", - "fs.go", - "fs_unsupported.go", - "io_util.go", - "util.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/pkg/volume/util", - importpath = "k8s.io/kubernetes/pkg/volume/util", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/storage/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/clientset:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:android": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/BUILD.bazel deleted file mode 100644 index 46c9b7dda68..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "scheduler_interface.go", - "types.go", - "well_known_labels.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/BUILD.bazel deleted file mode 100644 index 5da78ec63fd..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "error.go", - "metadata.go", - "predicates.go", - "utils.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/workqueue:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper/qos:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/client/listers/core/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/features:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/kubelet/apis:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/volume/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util:go_default_library", - "//tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util/BUILD.bazel deleted file mode 100644 index 82475b9c4bc..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "non_zero.go", - "topologies.go", - "util.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/features:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/BUILD.bazel deleted file mode 100644 index ae5a65904d8..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "register.go", - "types.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/api", - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/api", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/BUILD.bazel deleted file mode 100644 index 4b08b2b7bbe..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cache.go", - "interface.go", - "node_info.go", - "reconcile_affinity.go", - "util.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//tests/smoke/vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/tools/cache:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1/helper:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/features:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/BUILD.bazel b/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/BUILD.bazel deleted file mode 100644 index 8adf7fc765d..00000000000 --- a/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "backoff_utils.go", - "utils.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/util", - importpath = "k8s.io/kubernetes/plugin/pkg/scheduler/util", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/kubernetes/pkg/api/v1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics/BUILD.bazel b/tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics/BUILD.bazel deleted file mode 100644 index 9172adb50c1..00000000000 --- a/tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.generated.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics", - importpath = "k8s.io/metrics/pkg/apis/metrics", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/BUILD.bazel deleted file mode 100644 index 34d36cf4149..00000000000 --- a/tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.generated.go", - "types.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1", - importpath = "k8s.io/metrics/pkg/apis/metrics/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/gogo/protobuf/proto:go_default_library", - "//tests/smoke/vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//tests/smoke/vendor/github.com/ugorji/go/codec:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/pkg/api/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/BUILD.bazel b/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/BUILD.bazel deleted file mode 100644 index 970ebfeca73..00000000000 --- a/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset", - importpath = "k8s.io/metrics/pkg/client/clientset_generated/clientset", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/github.com/golang/glog:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/discovery:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - "//tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel b/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel deleted file mode 100644 index 82c57d49c3e..00000000000 --- a/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme", - importpath = "k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library", - ], -) diff --git a/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/BUILD.bazel b/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/BUILD.bazel deleted file mode 100644 index 1d9135a28aa..00000000000 --- a/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "metrics_client.go", - "nodemetrics.go", - "podmetrics.go", - ], - importmap = "installer/tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1", - importpath = "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//tests/smoke/vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//tests/smoke/vendor/k8s.io/client-go/rest:go_default_library", - "//tests/smoke/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library", - "//tests/smoke/vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) diff --git a/tests/smoke/vendor/vbom.ml/util/BUILD.bazel b/tests/smoke/vendor/vbom.ml/util/BUILD.bazel deleted file mode 100644 index 717364c26e1..00000000000 --- a/tests/smoke/vendor/vbom.ml/util/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "regexp.go", - ], - importmap = "installer/tests/smoke/vendor/vbom.ml/util", - importpath = "vbom.ml/util", - visibility = ["//visibility:public"], -) diff --git a/tests/smoke/vendor/vbom.ml/util/sortorder/BUILD.bazel b/tests/smoke/vendor/vbom.ml/util/sortorder/BUILD.bazel deleted file mode 100644 index 745ebea6b06..00000000000 --- a/tests/smoke/vendor/vbom.ml/util/sortorder/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "natsort.go", - ], - importmap = "installer/tests/smoke/vendor/vbom.ml/util/sortorder", - importpath = "vbom.ml/util/sortorder", - visibility = ["//visibility:public"], -) diff --git a/vendor/bitbucket.org/ww/goautoneg/BUILD.bazel b/vendor/bitbucket.org/ww/goautoneg/BUILD.bazel deleted file mode 100644 index 3eaa3d174cd..00000000000 --- a/vendor/bitbucket.org/ww/goautoneg/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["autoneg.go"], - importmap = "installer/vendor/bitbucket.org/ww/goautoneg", - importpath = "bitbucket.org/ww/goautoneg", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/AlecAivazis/survey/BUILD.bazel b/vendor/github.com/AlecAivazis/survey/BUILD.bazel deleted file mode 100644 index efadc9f9d24..00000000000 --- a/vendor/github.com/AlecAivazis/survey/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "confirm.go", - "editor.go", - "input.go", - "multiselect.go", - "password.go", - "select.go", - "survey.go", - "transform.go", - "validate.go", - ], - importmap = "installer/vendor/github.com/AlecAivazis/survey", - importpath = "github.com/AlecAivazis/survey", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/kballard/go-shellquote:go_default_library", - "//vendor/gopkg.in/AlecAivazis/survey.v1/core:go_default_library", - "//vendor/gopkg.in/AlecAivazis/survey.v1/terminal:go_default_library", - ], -) diff --git a/vendor/github.com/NYTimes/gziphandler/BUILD.bazel b/vendor/github.com/NYTimes/gziphandler/BUILD.bazel deleted file mode 100644 index c510244ad42..00000000000 --- a/vendor/github.com/NYTimes/gziphandler/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "gzip.go", - "gzip_go18.go", - ], - importmap = "installer/vendor/github.com/NYTimes/gziphandler", - importpath = "github.com/NYTimes/gziphandler", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/PuerkitoBio/purell/BUILD.bazel b/vendor/github.com/PuerkitoBio/purell/BUILD.bazel deleted file mode 100644 index e4b3bf6d2c0..00000000000 --- a/vendor/github.com/PuerkitoBio/purell/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["purell.go"], - importmap = "installer/vendor/github.com/PuerkitoBio/purell", - importpath = "github.com/PuerkitoBio/purell", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/PuerkitoBio/urlesc:go_default_library", - "//vendor/golang.org/x/net/idna:go_default_library", - "//vendor/golang.org/x/text/secure/precis:go_default_library", - "//vendor/golang.org/x/text/unicode/norm:go_default_library", - ], -) diff --git a/vendor/github.com/PuerkitoBio/urlesc/BUILD.bazel b/vendor/github.com/PuerkitoBio/urlesc/BUILD.bazel deleted file mode 100644 index e50921fdcde..00000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["urlesc.go"], - importmap = "installer/vendor/github.com/PuerkitoBio/urlesc", - importpath = "github.com/PuerkitoBio/urlesc", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/ajeddeloh/go-json/BUILD.bazel b/vendor/github.com/ajeddeloh/go-json/BUILD.bazel deleted file mode 100644 index f8f0f9f196c..00000000000 --- a/vendor/github.com/ajeddeloh/go-json/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "decode.go", - "encode.go", - "fold.go", - "indent.go", - "scanner.go", - "stream.go", - "tags.go", - ], - importmap = "installer/vendor/github.com/ajeddeloh/go-json", - importpath = "github.com/ajeddeloh/go-json", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/alecthomas/template/BUILD.bazel b/vendor/github.com/alecthomas/template/BUILD.bazel deleted file mode 100644 index 9a412aa6169..00000000000 --- a/vendor/github.com/alecthomas/template/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "exec.go", - "funcs.go", - "helper.go", - "template.go", - ], - importmap = "installer/vendor/github.com/alecthomas/template", - importpath = "github.com/alecthomas/template", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/alecthomas/template/parse:go_default_library"], -) diff --git a/vendor/github.com/alecthomas/template/parse/BUILD.bazel b/vendor/github.com/alecthomas/template/parse/BUILD.bazel deleted file mode 100644 index 61b1771df46..00000000000 --- a/vendor/github.com/alecthomas/template/parse/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "lex.go", - "node.go", - "parse.go", - ], - importmap = "installer/vendor/github.com/alecthomas/template/parse", - importpath = "github.com/alecthomas/template/parse", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/alecthomas/units/BUILD.bazel b/vendor/github.com/alecthomas/units/BUILD.bazel deleted file mode 100644 index 63234ab856d..00000000000 --- a/vendor/github.com/alecthomas/units/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bytes.go", - "doc.go", - "si.go", - "util.go", - ], - importmap = "installer/vendor/github.com/alecthomas/units", - importpath = "github.com/alecthomas/units", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/BUILD.bazel b/vendor/github.com/apparentlymart/go-cidr/cidr/BUILD.bazel deleted file mode 100644 index f25e32ec886..00000000000 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cidr.go", - "wrangling.go", - ], - importmap = "installer/vendor/github.com/apparentlymart/go-cidr/cidr", - importpath = "github.com/apparentlymart/go-cidr/cidr", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/BUILD.bazel deleted file mode 100644 index 7ac70c69d93..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - importmap = "installer/vendor/github.com/aws/aws-sdk-go", - importpath = "github.com/aws/aws-sdk-go", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/BUILD.bazel deleted file mode 100644 index c8e28e05b51..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "context.go", - "context_1_6.go", - "context_1_7.go", - "convert_types.go", - "doc.go", - "errors.go", - "jsonvalue.go", - "logger.go", - "types.go", - "url.go", - "url_1_7.go", - "version.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws", - importpath = "github.com/aws/aws-sdk-go/aws", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/endpoints:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/internal/sdkio:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/awserr/BUILD.bazel deleted file mode 100644 index ff088d61e69..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "error.go", - "types.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/awserr", - importpath = "github.com/aws/aws-sdk-go/aws/awserr", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/BUILD.bazel deleted file mode 100644 index e9c169b5565..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "copy.go", - "equal.go", - "path_value.go", - "prettify.go", - "string_value.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/awsutil", - importpath = "github.com/aws/aws-sdk-go/aws/awsutil", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/jmespath/go-jmespath:go_default_library"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/client/BUILD.bazel deleted file mode 100644 index d6a37e5863c..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "default_retryer.go", - "logger.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/client", - importpath = "github.com/aws/aws-sdk-go/aws/client", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/internal/sdkrand:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/BUILD.bazel deleted file mode 100644 index 08a3d9f0ad3..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["client_info.go"], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/client/metadata", - importpath = "github.com/aws/aws-sdk-go/aws/client/metadata", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/BUILD.bazel deleted file mode 100644 index a75bf32bd68..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "handlers.go", - "param_validator.go", - "user_agent.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/corehandlers", - importpath = "github.com/aws/aws-sdk-go/aws/corehandlers", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/credentials/BUILD.bazel deleted file mode 100644 index b0e3ed88adc..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "chain_provider.go", - "credentials.go", - "env_provider.go", - "shared_credentials_provider.go", - "static_provider.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/credentials", - importpath = "github.com/aws/aws-sdk-go/aws/credentials", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/internal/shareddefaults:go_default_library", - "//vendor/github.com/go-ini/ini:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/BUILD.bazel deleted file mode 100644 index eae4dd8010e..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["ec2_role_provider.go"], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - importpath = "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/ec2metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/internal/sdkuri:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/BUILD.bazel deleted file mode 100644 index edd1628b9a8..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["provider.go"], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - importpath = "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/BUILD.bazel deleted file mode 100644 index 511ecc0b175..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["assume_role_provider.go"], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds", - importpath = "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/sts:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/csm/BUILD.bazel deleted file mode 100644 index 864ab8e8c38..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "enable.go", - "metric.go", - "metric_chan.go", - "reporter.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/csm", - importpath = "github.com/aws/aws-sdk-go/aws/csm", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/defaults/BUILD.bazel deleted file mode 100644 index 4667cacda72..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "defaults.go", - "shared_config.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/defaults", - importpath = "github.com/aws/aws-sdk-go/aws/defaults", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/corehandlers:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/ec2metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/endpoints:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/internal/shareddefaults:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/BUILD.bazel deleted file mode 100644 index 098c4ed8731..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api.go", - "service.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata", - importpath = "github.com/aws/aws-sdk-go/aws/ec2metadata", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/corehandlers:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/internal/sdkuri:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/BUILD.bazel deleted file mode 100644 index a08394856ef..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "decode.go", - "defaults.go", - "doc.go", - "endpoints.go", - "v3model.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/endpoints", - importpath = "github.com/aws/aws-sdk-go/aws/endpoints", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/request/BUILD.bazel deleted file mode 100644 index beffb34ab6e..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "connection_reset_error.go", - "connection_reset_error_other.go", - "handlers.go", - "http_request.go", - "offset_reader.go", - "request.go", - "request_1_7.go", - "request_1_8.go", - "request_context.go", - "request_context_1_6.go", - "request_pagination.go", - "retryer.go", - "timeout_read_closer.go", - "validation.go", - "waiter.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/request", - importpath = "github.com/aws/aws-sdk-go/aws/request", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awsutil:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/internal/sdkio:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/session/BUILD.bazel deleted file mode 100644 index fd64d7789fe..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "env_config.go", - "session.go", - "shared_config.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/session", - importpath = "github.com/aws/aws-sdk-go/aws/session", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/corehandlers:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/csm:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/defaults:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/endpoints:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/go-ini/ini:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/BUILD.bazel deleted file mode 100644 index bf7bf1d5ece..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "header_rules.go", - "options.go", - "uri_path.go", - "v4.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/aws/signer/v4", - importpath = "github.com/aws/aws-sdk-go/aws/signer/v4", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/internal/sdkio:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/rest:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/BUILD.bazel deleted file mode 100644 index cb754d59575..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "io_go1.6.go", - "io_go1.7.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/internal/sdkio", - importpath = "github.com/aws/aws-sdk-go/internal/sdkio", - visibility = ["//vendor/github.com/aws/aws-sdk-go:__subpackages__"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/BUILD.bazel deleted file mode 100644 index 53567cdeb8d..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["locked_source.go"], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/internal/sdkrand", - importpath = "github.com/aws/aws-sdk-go/internal/sdkrand", - visibility = ["//vendor/github.com/aws/aws-sdk-go:__subpackages__"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/BUILD.bazel deleted file mode 100644 index 8111c1159ca..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["path.go"], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/internal/sdkuri", - importpath = "github.com/aws/aws-sdk-go/internal/sdkuri", - visibility = ["//vendor/github.com/aws/aws-sdk-go:__subpackages__"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/BUILD.bazel deleted file mode 100644 index e95ae6587f8..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["shared_config.go"], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults", - importpath = "github.com/aws/aws-sdk-go/internal/shareddefaults", - visibility = ["//vendor/github.com/aws/aws-sdk-go:__subpackages__"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/private/protocol/BUILD.bazel deleted file mode 100644 index bca0d71f612..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "idempotency.go", - "jsonvalue.go", - "payload.go", - "timestamp.go", - "unmarshal.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/private/protocol", - importpath = "github.com/aws/aws-sdk-go/private/protocol", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/BUILD.bazel deleted file mode 100644 index bbc964e22f1..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "build.go", - "unmarshal.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query", - importpath = "github.com/aws/aws-sdk-go/private/protocol/ec2query", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/BUILD.bazel deleted file mode 100644 index 84b7df9cebe..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "debug.go", - "decode.go", - "encode.go", - "error.go", - "header.go", - "header_value.go", - "message.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream", - importpath = "github.com/aws/aws-sdk-go/private/protocol/eventstream", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/aws/aws-sdk-go/aws:go_default_library"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/BUILD.bazel deleted file mode 100644 index 5ab73770c41..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api.go", - "error.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi", - importpath = "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/BUILD.bazel deleted file mode 100644 index fb19a9beaf9..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "build.go", - "unmarshal.go", - "unmarshal_error.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/private/protocol/query", - importpath = "github.com/aws/aws-sdk-go/private/protocol/query", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/BUILD.bazel deleted file mode 100644 index 1d94b892201..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["queryutil.go"], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - importpath = "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/aws/aws-sdk-go/private/protocol:go_default_library"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/BUILD.bazel deleted file mode 100644 index 852591176ed..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "build.go", - "payload.go", - "unmarshal.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/private/protocol/rest", - importpath = "github.com/aws/aws-sdk-go/private/protocol/rest", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/BUILD.bazel deleted file mode 100644 index 4f3f77712ab..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["restxml.go"], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml", - importpath = "github.com/aws/aws-sdk-go/private/protocol/restxml", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/query:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/rest:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/BUILD.bazel deleted file mode 100644 index feae86a9262..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "build.go", - "unmarshal.go", - "xml_to_struct.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - importpath = "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/aws/aws-sdk-go/private/protocol:go_default_library"], -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/BUILD.bazel deleted file mode 100644 index 55891827276..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api.go", - "doc.go", - "errors.go", - "service.go", - "waiters.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/service/autoscaling", - importpath = "github.com/aws/aws-sdk-go/service/autoscaling", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awsutil:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/signer/v4:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/query:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/service/ec2/BUILD.bazel deleted file mode 100644 index bfda841a5f1..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api.go", - "customizations.go", - "doc.go", - "errors.go", - "service.go", - "waiters.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/service/ec2", - importpath = "github.com/aws/aws-sdk-go/service/ec2", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awsutil:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/endpoints:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/signer/v4:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/internal/sdkrand:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/service/elb/BUILD.bazel deleted file mode 100644 index eb15df09b06..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api.go", - "doc.go", - "errors.go", - "service.go", - "waiters.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/service/elb", - importpath = "github.com/aws/aws-sdk-go/service/elb", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awsutil:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/signer/v4:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/query:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/service/iam/BUILD.bazel deleted file mode 100644 index 19cb2c4ffb4..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api.go", - "doc.go", - "errors.go", - "service.go", - "waiters.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/service/iam", - importpath = "github.com/aws/aws-sdk-go/service/iam", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awsutil:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/signer/v4:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/query:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/service/route53/BUILD.bazel deleted file mode 100644 index af39075624f..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api.go", - "customizations.go", - "doc.go", - "errors.go", - "service.go", - "unmarshal_error.go", - "waiters.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/service/route53", - importpath = "github.com/aws/aws-sdk-go/service/route53", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awsutil:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/signer/v4:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/restxml:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/service/s3/BUILD.bazel deleted file mode 100644 index 3d29938c892..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/BUILD.bazel +++ /dev/null @@ -1,40 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api.go", - "body_hash.go", - "bucket_location.go", - "customizations.go", - "doc.go", - "doc_custom.go", - "errors.go", - "host_style_bucket.go", - "platform_handlers.go", - "platform_handlers_go1.6.go", - "service.go", - "sse.go", - "statusok_error.go", - "unmarshal_error.go", - "waiters.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/service/s3", - importpath = "github.com/aws/aws-sdk-go/service/s3", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awsutil:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/signer/v4:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/internal/sdkio:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/rest:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/restxml:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/BUILD.bazel deleted file mode 100644 index de575c28db8..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface", - importpath = "github.com/aws/aws-sdk-go/service/s3/s3iface", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/s3:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/BUILD.bazel deleted file mode 100644 index 6af5ed9d827..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "batch.go", - "bucket_region.go", - "doc.go", - "download.go", - "upload.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager", - importpath = "github.com/aws/aws-sdk-go/service/s3/s3manager", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awsutil:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/s3:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/s3/s3iface:go_default_library", - ], -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/BUILD.bazel b/vendor/github.com/aws/aws-sdk-go/service/sts/BUILD.bazel deleted file mode 100644 index 5daaa096d49..00000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api.go", - "customizations.go", - "doc.go", - "errors.go", - "service.go", - ], - importmap = "installer/vendor/github.com/aws/aws-sdk-go/service/sts", - importpath = "github.com/aws/aws-sdk-go/service/sts", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awsutil:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/client/metadata:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/signer/v4:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/private/protocol/query:go_default_library", - ], -) diff --git a/vendor/github.com/beorn7/perks/quantile/BUILD.bazel b/vendor/github.com/beorn7/perks/quantile/BUILD.bazel deleted file mode 100644 index 2c3cd6facd2..00000000000 --- a/vendor/github.com/beorn7/perks/quantile/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["stream.go"], - importmap = "installer/vendor/github.com/beorn7/perks/quantile", - importpath = "github.com/beorn7/perks/quantile", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/BUILD.bazel b/vendor/github.com/coreos/etcd/BUILD.bazel deleted file mode 100644 index de0e605a2ee..00000000000 --- a/vendor/github.com/coreos/etcd/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") - -go_library( - name = "go_default_library", - srcs = ["main.go"], - importmap = "installer/vendor/github.com/coreos/etcd", - importpath = "github.com/coreos/etcd", - visibility = ["//visibility:private"], - deps = ["//vendor/github.com/coreos/etcd/etcdmain:go_default_library"], -) - -go_binary( - name = "etcd", - embed = [":go_default_library"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/alarm/BUILD.bazel b/vendor/github.com/coreos/etcd/alarm/BUILD.bazel deleted file mode 100644 index 163da99210c..00000000000 --- a/vendor/github.com/coreos/etcd/alarm/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["alarms.go"], - importmap = "installer/vendor/github.com/coreos/etcd/alarm", - importpath = "github.com/coreos/etcd/alarm", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/auth/BUILD.bazel b/vendor/github.com/coreos/etcd/auth/BUILD.bazel deleted file mode 100644 index d993478a1f8..00000000000 --- a/vendor/github.com/coreos/etcd/auth/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "range_perm_cache.go", - "simple_token.go", - "store.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/auth", - importpath = "github.com/coreos/etcd/auth", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/golang.org/x/crypto/bcrypt:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc/metadata:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/auth/authpb/BUILD.bazel b/vendor/github.com/coreos/etcd/auth/authpb/BUILD.bazel deleted file mode 100644 index adf306e1af1..00000000000 --- a/vendor/github.com/coreos/etcd/auth/authpb/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["auth.pb.go"], - importmap = "installer/vendor/github.com/coreos/etcd/auth/authpb", - importpath = "github.com/coreos/etcd/auth/authpb", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/coreos/etcd/client/BUILD.bazel b/vendor/github.com/coreos/etcd/client/BUILD.bazel deleted file mode 100644 index 50a5d7be881..00000000000 --- a/vendor/github.com/coreos/etcd/client/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "auth_role.go", - "auth_user.go", - "cancelreq.go", - "client.go", - "cluster_error.go", - "curl.go", - "discover.go", - "doc.go", - "keys.generated.go", - "keys.go", - "members.go", - "srv.go", - "util.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/client", - importpath = "github.com/coreos/etcd/client", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/pkg/pathutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/ugorji/go/codec:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/clientv3/BUILD.bazel b/vendor/github.com/coreos/etcd/clientv3/BUILD.bazel deleted file mode 100644 index f05e0f928e8..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/BUILD.bazel +++ /dev/null @@ -1,42 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "auth.go", - "balancer.go", - "client.go", - "cluster.go", - "compact_op.go", - "compare.go", - "config.go", - "doc.go", - "kv.go", - "lease.go", - "logger.go", - "maintenance.go", - "op.go", - "retry.go", - "sort.go", - "txn.go", - "watch.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/clientv3", - importpath = "github.com/coreos/etcd/clientv3", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - "//vendor/google.golang.org/grpc/metadata:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/compactor/BUILD.bazel b/vendor/github.com/coreos/etcd/compactor/BUILD.bazel deleted file mode 100644 index c7c3dc698d2..00000000000 --- a/vendor/github.com/coreos/etcd/compactor/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "compactor.go", - "doc.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/compactor", - importpath = "github.com/coreos/etcd/compactor", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/jonboulle/clockwork:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/discovery/BUILD.bazel b/vendor/github.com/coreos/etcd/discovery/BUILD.bazel deleted file mode 100644 index 3ee56648bcb..00000000000 --- a/vendor/github.com/coreos/etcd/discovery/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "discovery.go", - "srv.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/discovery", - importpath = "github.com/coreos/etcd/discovery", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/client:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/jonboulle/clockwork:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/error/BUILD.bazel b/vendor/github.com/coreos/etcd/error/BUILD.bazel deleted file mode 100644 index b5c0a687fb3..00000000000 --- a/vendor/github.com/coreos/etcd/error/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["error.go"], - importmap = "installer/vendor/github.com/coreos/etcd/error", - importpath = "github.com/coreos/etcd/error", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/BUILD.bazel b/vendor/github.com/coreos/etcd/etcdserver/BUILD.bazel deleted file mode 100644 index b1b6fb1f192..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/BUILD.bazel +++ /dev/null @@ -1,67 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apply.go", - "apply_auth.go", - "apply_v2.go", - "cluster_util.go", - "config.go", - "consistent_index.go", - "doc.go", - "errors.go", - "metrics.go", - "quota.go", - "raft.go", - "server.go", - "snapshot_merge.go", - "storage.go", - "util.go", - "v2_server.go", - "v3_server.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/etcdserver", - importpath = "github.com/coreos/etcd/etcdserver", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/alarm:go_default_library", - "//vendor/github.com/coreos/etcd/auth:go_default_library", - "//vendor/github.com/coreos/etcd/compactor:go_default_library", - "//vendor/github.com/coreos/etcd/discovery:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/stats:go_default_library", - "//vendor/github.com/coreos/etcd/lease:go_default_library", - "//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/contention:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/idutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/runtime:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/schedule:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/wait:go_default_library", - "//vendor/github.com/coreos/etcd/raft:go_default_library", - "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", - "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", - "//vendor/github.com/coreos/etcd/snap:go_default_library", - "//vendor/github.com/coreos/etcd/store:go_default_library", - "//vendor/github.com/coreos/etcd/version:go_default_library", - "//vendor/github.com/coreos/etcd/wal:go_default_library", - "//vendor/github.com/coreos/etcd/wal/walpb:go_default_library", - "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/BUILD.bazel b/vendor/github.com/coreos/etcd/etcdserver/api/BUILD.bazel deleted file mode 100644 index 5e55140df71..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "capability.go", - "cluster.go", - "doc.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/etcdserver/api", - importpath = "github.com/coreos/etcd/etcdserver/api", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/etcd/version:go_default_library", - "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD.bazel b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD.bazel deleted file mode 100644 index 7b01a8dc6dc..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "capability.go", - "client.go", - "client_auth.go", - "doc.go", - "http.go", - "metrics.go", - "peer.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/etcdserver/api/v2http", - importpath = "github.com/coreos/etcd/etcdserver/api/v2http", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/error:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/auth:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/stats:go_default_library", - "//vendor/github.com/coreos/etcd/lease/leasehttp:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/etcd/raft:go_default_library", - "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", - "//vendor/github.com/coreos/etcd/store:go_default_library", - "//vendor/github.com/coreos/etcd/version:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/jonboulle/clockwork:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/BUILD.bazel b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/BUILD.bazel deleted file mode 100644 index 7e4d74becf3..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "errors.go", - "member.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes", - importpath = "github.com/coreos/etcd/etcdserver/api/v2http/httptypes", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/BUILD.bazel b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/BUILD.bazel deleted file mode 100644 index a824d14c57a..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/BUILD.bazel +++ /dev/null @@ -1,48 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "auth.go", - "codec.go", - "grpc.go", - "header.go", - "interceptor.go", - "key.go", - "lease.go", - "maintenance.go", - "member.go", - "metrics.go", - "quota.go", - "util.go", - "watch.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc", - importpath = "github.com/coreos/etcd/etcdserver/api/v3rpc", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/auth:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/membership:go_default_library", - "//vendor/github.com/coreos/etcd/lease:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/etcd/raft:go_default_library", - "//vendor/github.com/coreos/etcd/version:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/grpc-ecosystem/go-grpc-prometheus:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - "//vendor/google.golang.org/grpc/metadata:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD.bazel b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD.bazel deleted file mode 100644 index 137262300ef..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "error.go", - "md.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - importpath = "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - visibility = ["//visibility:public"], - deps = [ - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/auth/BUILD.bazel b/vendor/github.com/coreos/etcd/etcdserver/auth/BUILD.bazel deleted file mode 100644 index 2140d7cd6b0..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/auth/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "auth.go", - "auth_requests.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/etcdserver/auth", - importpath = "github.com/coreos/etcd/etcdserver/auth", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/error:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/golang.org/x/crypto/bcrypt:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD.bazel b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD.bazel deleted file mode 100644 index 1c1cd7cc0e4..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "etcdserver.pb.go", - "raft_internal.pb.go", - "rpc.pb.go", - "rpc.pb.gw.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb", - importpath = "github.com/coreos/etcd/etcdserver/etcdserverpb", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/auth/authpb:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD.bazel b/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD.bazel deleted file mode 100644 index f1535076529..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "errors.go", - "member.go", - "store.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/etcdserver/membership", - importpath = "github.com/coreos/etcd/etcdserver/membership", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/error:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/netutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/etcd/raft:go_default_library", - "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", - "//vendor/github.com/coreos/etcd/store:go_default_library", - "//vendor/github.com/coreos/etcd/version:go_default_library", - "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/BUILD.bazel b/vendor/github.com/coreos/etcd/etcdserver/stats/BUILD.bazel deleted file mode 100644 index ad52a5bf497..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "leader.go", - "queue.go", - "server.go", - "stats.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/etcdserver/stats", - importpath = "github.com/coreos/etcd/etcdserver/stats", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/raft:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/integration/BUILD.bazel b/vendor/github.com/coreos/etcd/integration/BUILD.bazel deleted file mode 100644 index 18817e3bd13..00000000000 --- a/vendor/github.com/coreos/etcd/integration/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bridge.go", - "cluster.go", - "cluster_direct.go", - "doc.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/integration", - importpath = "github.com/coreos/etcd/integration", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/client:go_default_library", - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v2http:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/etcd/rafthttp:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/lease/BUILD.bazel b/vendor/github.com/coreos/etcd/lease/BUILD.bazel deleted file mode 100644 index 39a597ae14f..00000000000 --- a/vendor/github.com/coreos/etcd/lease/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "lessor.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/lease", - importpath = "github.com/coreos/etcd/lease", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/lease/leasepb:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/monotime:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD.bazel b/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD.bazel deleted file mode 100644 index 73a820e3c60..00000000000 --- a/vendor/github.com/coreos/etcd/lease/leasehttp/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "http.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/lease/leasehttp", - importpath = "github.com/coreos/etcd/lease/leasehttp", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/lease:go_default_library", - "//vendor/github.com/coreos/etcd/lease/leasepb:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/BUILD.bazel b/vendor/github.com/coreos/etcd/lease/leasepb/BUILD.bazel deleted file mode 100644 index 4cab72f3cbe..00000000000 --- a/vendor/github.com/coreos/etcd/lease/leasepb/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["lease.pb.go"], - importmap = "installer/vendor/github.com/coreos/etcd/lease/leasepb", - importpath = "github.com/coreos/etcd/lease/leasepb", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/mvcc/BUILD.bazel b/vendor/github.com/coreos/etcd/mvcc/BUILD.bazel deleted file mode 100644 index f7c7f76f3f6..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "index.go", - "key_index.go", - "kv.go", - "kvstore.go", - "kvstore_compaction.go", - "metrics.go", - "revision.go", - "util.go", - "watchable_store.go", - "watcher.go", - "watcher_group.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/mvcc", - importpath = "github.com/coreos/etcd/mvcc", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/lease:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/backend:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/adt:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/schedule:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/google/btree:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/BUILD.bazel b/vendor/github.com/coreos/etcd/mvcc/backend/BUILD.bazel deleted file mode 100644 index 68f2c6301e5..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "backend.go", - "batch_tx.go", - "boltoption_default.go", - "boltoption_linux.go", - "doc.go", - "metrics.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/mvcc/backend", - importpath = "github.com/coreos/etcd/mvcc/backend", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/boltdb/bolt:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/BUILD.bazel b/vendor/github.com/coreos/etcd/mvcc/mvccpb/BUILD.bazel deleted file mode 100644 index b31f235ae1e..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["kv.pb.go"], - importmap = "installer/vendor/github.com/coreos/etcd/mvcc/mvccpb", - importpath = "github.com/coreos/etcd/mvcc/mvccpb", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/adt/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/adt/BUILD.bazel deleted file mode 100644 index 8dbcb859e10..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/adt/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "interval_tree.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/adt", - importpath = "github.com/coreos/etcd/pkg/adt", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/contention/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/contention/BUILD.bazel deleted file mode 100644 index d38da0dabbe..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/contention/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "contention.go", - "doc.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/contention", - importpath = "github.com/coreos/etcd/pkg/contention", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/cpuutil/BUILD.bazel deleted file mode 100644 index 1343c3013d5..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/cpuutil/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "endian.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/cpuutil", - importpath = "github.com/coreos/etcd/pkg/cpuutil", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/crc/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/crc/BUILD.bazel deleted file mode 100644 index 76334587cae..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/crc/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["crc.go"], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/crc", - importpath = "github.com/coreos/etcd/pkg/crc", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/fileutil/BUILD.bazel deleted file mode 100644 index 838808fcdca..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "dir_unix.go", - "dir_windows.go", - "fileutil.go", - "lock.go", - "lock_flock.go", - "lock_linux.go", - "lock_plan9.go", - "lock_solaris.go", - "lock_unix.go", - "lock_windows.go", - "preallocate.go", - "preallocate_darwin.go", - "preallocate_unix.go", - "preallocate_unsupported.go", - "purge.go", - "sync.go", - "sync_darwin.go", - "sync_linux.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/fileutil", - importpath = "github.com/coreos/etcd/pkg/fileutil", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/httputil/BUILD.bazel deleted file mode 100644 index f72f0ee7418..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/httputil/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["httputil.go"], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/httputil", - importpath = "github.com/coreos/etcd/pkg/httputil", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/idutil/BUILD.bazel deleted file mode 100644 index c1e1f6d25de..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/idutil/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["id.go"], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/idutil", - importpath = "github.com/coreos/etcd/pkg/idutil", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/ioutil/BUILD.bazel deleted file mode 100644 index d7ef74efaac..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/ioutil/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "pagewriter.go", - "readcloser.go", - "reader.go", - "util.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/ioutil", - importpath = "github.com/coreos/etcd/pkg/ioutil", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/logutil/BUILD.bazel deleted file mode 100644 index de152d93bfa..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/logutil/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["merge_logger.go"], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/logutil", - importpath = "github.com/coreos/etcd/pkg/logutil", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/coreos/pkg/capnslog:go_default_library"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/monotime/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/monotime/BUILD.bazel deleted file mode 100644 index 4ac3293090f..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/monotime/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "issue15006.s", - "monotime.go", - "nanotime.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/monotime", - importpath = "github.com/coreos/etcd/pkg/monotime", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/netutil/BUILD.bazel deleted file mode 100644 index 9869d15366e..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/netutil/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "isolate_linux.go", - "isolate_stub.go", - "netutil.go", - "routes.go", - "routes_linux.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/netutil", - importpath = "github.com/coreos/etcd/pkg/netutil", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/coreos/etcd/pkg/cpuutil:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/pathutil/BUILD.bazel deleted file mode 100644 index 31f31319ebf..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/pathutil/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["path.go"], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/pathutil", - importpath = "github.com/coreos/etcd/pkg/pathutil", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/pbutil/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/pbutil/BUILD.bazel deleted file mode 100644 index f88ec441b94..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/pbutil/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["pbutil.go"], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/pbutil", - importpath = "github.com/coreos/etcd/pkg/pbutil", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/coreos/pkg/capnslog:go_default_library"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/runtime/BUILD.bazel deleted file mode 100644 index c06bbb4fd17..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/runtime/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "fds_linux.go", - "fds_other.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/runtime", - importpath = "github.com/coreos/etcd/pkg/runtime", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/schedule/BUILD.bazel deleted file mode 100644 index 4581189d744..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/schedule/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "schedule.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/schedule", - importpath = "github.com/coreos/etcd/pkg/schedule", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/net/context:go_default_library"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/testutil/BUILD.bazel deleted file mode 100644 index 138fba4ebc1..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/testutil/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "leak.go", - "pauseable_handler.go", - "recorder.go", - "testutil.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/testutil", - importpath = "github.com/coreos/etcd/pkg/testutil", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/tlsutil/BUILD.bazel deleted file mode 100644 index f263722bbdc..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/tlsutil/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "tlsutil.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/tlsutil", - importpath = "github.com/coreos/etcd/pkg/tlsutil", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/transport/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/transport/BUILD.bazel deleted file mode 100644 index 811365f3755..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "keepalive_listener.go", - "limit_listen.go", - "listener.go", - "timeout_conn.go", - "timeout_dialer.go", - "timeout_listener.go", - "timeout_transport.go", - "tls.go", - "transport.go", - "unix_listener.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/transport", - importpath = "github.com/coreos/etcd/pkg/transport", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/tlsutil:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/pkg/types/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/types/BUILD.bazel deleted file mode 100644 index eb7f14a9d6f..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "id.go", - "set.go", - "slice.go", - "urls.go", - "urlsmap.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/types", - importpath = "github.com/coreos/etcd/pkg/types", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/pkg/wait/BUILD.bazel b/vendor/github.com/coreos/etcd/pkg/wait/BUILD.bazel deleted file mode 100644 index 8606696539b..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/wait/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "wait.go", - "wait_time.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/pkg/wait", - importpath = "github.com/coreos/etcd/pkg/wait", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD.bazel b/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD.bazel deleted file mode 100644 index 1c4a23c3669..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "auth.go", - "cluster.go", - "doc.go", - "kv.go", - "kv_client_adapter.go", - "lease.go", - "maintenance.go", - "metrics.go", - "watch.go", - "watch_broadcast.go", - "watch_broadcasts.go", - "watch_client_adapter.go", - "watch_ranges.go", - "watcher.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/proxy/grpcproxy", - importpath = "github.com/coreos/etcd/proxy/grpcproxy", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/coreos/etcd/proxy/grpcproxy/cache:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/time/rate:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/metadata:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD.bazel b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD.bazel deleted file mode 100644 index 7f3b4623a92..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["store.go"], - importmap = "installer/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache", - importpath = "github.com/coreos/etcd/proxy/grpcproxy/cache", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/etcdserverpb:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/adt:go_default_library", - "//vendor/github.com/karlseguin/ccache:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/raft/BUILD.bazel b/vendor/github.com/coreos/etcd/raft/BUILD.bazel deleted file mode 100644 index 49e46a17129..00000000000 --- a/vendor/github.com/coreos/etcd/raft/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "log.go", - "log_unstable.go", - "logger.go", - "node.go", - "progress.go", - "raft.go", - "rawnode.go", - "read_only.go", - "status.go", - "storage.go", - "util.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/raft", - importpath = "github.com/coreos/etcd/raft", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/BUILD.bazel b/vendor/github.com/coreos/etcd/raft/raftpb/BUILD.bazel deleted file mode 100644 index 5eb5951f0ab..00000000000 --- a/vendor/github.com/coreos/etcd/raft/raftpb/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["raft.pb.go"], - importmap = "installer/vendor/github.com/coreos/etcd/raft/raftpb", - importpath = "github.com/coreos/etcd/raft/raftpb", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/coreos/etcd/rafthttp/BUILD.bazel b/vendor/github.com/coreos/etcd/rafthttp/BUILD.bazel deleted file mode 100644 index bca606b228d..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/BUILD.bazel +++ /dev/null @@ -1,44 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "coder.go", - "doc.go", - "http.go", - "metrics.go", - "msg_codec.go", - "msgappv2_codec.go", - "peer.go", - "peer_status.go", - "pipeline.go", - "probing_status.go", - "remote.go", - "snapshot_sender.go", - "stream.go", - "transport.go", - "urlpick.go", - "util.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/rafthttp", - importpath = "github.com/coreos/etcd/rafthttp", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/etcdserver/stats:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/httputil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/ioutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/logutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/coreos/etcd/raft:go_default_library", - "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", - "//vendor/github.com/coreos/etcd/snap:go_default_library", - "//vendor/github.com/coreos/etcd/version:go_default_library", - "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/github.com/xiang90/probing:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/snap/BUILD.bazel b/vendor/github.com/coreos/etcd/snap/BUILD.bazel deleted file mode 100644 index d970b10b5ac..00000000000 --- a/vendor/github.com/coreos/etcd/snap/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "db.go", - "message.go", - "metrics.go", - "snapshotter.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/snap", - importpath = "github.com/coreos/etcd/snap", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/ioutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library", - "//vendor/github.com/coreos/etcd/raft:go_default_library", - "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", - "//vendor/github.com/coreos/etcd/snap/snappb:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/snap/snappb/BUILD.bazel b/vendor/github.com/coreos/etcd/snap/snappb/BUILD.bazel deleted file mode 100644 index 3dbb7071506..00000000000 --- a/vendor/github.com/coreos/etcd/snap/snappb/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["snap.pb.go"], - importmap = "installer/vendor/github.com/coreos/etcd/snap/snappb", - importpath = "github.com/coreos/etcd/snap/snappb", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/coreos/etcd/store/BUILD.bazel b/vendor/github.com/coreos/etcd/store/BUILD.bazel deleted file mode 100644 index 233a7611447..00000000000 --- a/vendor/github.com/coreos/etcd/store/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "event.go", - "event_history.go", - "event_queue.go", - "metrics.go", - "node.go", - "node_extern.go", - "stats.go", - "store.go", - "ttl_key_heap.go", - "watcher.go", - "watcher_hub.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/store", - importpath = "github.com/coreos/etcd/store", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/error:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/jonboulle/clockwork:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/version/BUILD.bazel b/vendor/github.com/coreos/etcd/version/BUILD.bazel deleted file mode 100644 index 87a7ed019b1..00000000000 --- a/vendor/github.com/coreos/etcd/version/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["version.go"], - importmap = "installer/vendor/github.com/coreos/etcd/version", - importpath = "github.com/coreos/etcd/version", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/coreos/go-semver/semver:go_default_library"], -) diff --git a/vendor/github.com/coreos/etcd/wal/BUILD.bazel b/vendor/github.com/coreos/etcd/wal/BUILD.bazel deleted file mode 100644 index 41d9321e2d3..00000000000 --- a/vendor/github.com/coreos/etcd/wal/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "decoder.go", - "doc.go", - "encoder.go", - "file_pipeline.go", - "metrics.go", - "repair.go", - "util.go", - "wal.go", - "wal_unix.go", - "wal_windows.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/wal", - importpath = "github.com/coreos/etcd/wal", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/pkg/crc:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/fileutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/ioutil:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/pbutil:go_default_library", - "//vendor/github.com/coreos/etcd/raft:go_default_library", - "//vendor/github.com/coreos/etcd/raft/raftpb:go_default_library", - "//vendor/github.com/coreos/etcd/wal/walpb:go_default_library", - "//vendor/github.com/coreos/pkg/capnslog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/etcd/wal/walpb/BUILD.bazel b/vendor/github.com/coreos/etcd/wal/walpb/BUILD.bazel deleted file mode 100644 index add8d1f39c1..00000000000 --- a/vendor/github.com/coreos/etcd/wal/walpb/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "record.go", - "record.pb.go", - ], - importmap = "installer/vendor/github.com/coreos/etcd/wal/walpb", - importpath = "github.com/coreos/etcd/wal/walpb", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/coreos/go-semver/BUILD.bazel b/vendor/github.com/coreos/go-semver/BUILD.bazel deleted file mode 100644 index 5ddda349a28..00000000000 --- a/vendor/github.com/coreos/go-semver/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") - -go_library( - name = "go_default_library", - srcs = ["example.go"], - importmap = "installer/vendor/github.com/coreos/go-semver", - importpath = "github.com/coreos/go-semver", - visibility = ["//visibility:private"], - deps = ["//vendor/github.com/coreos/go-semver/semver:go_default_library"], -) - -go_binary( - name = "go-semver", - embed = [":go_default_library"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/go-semver/semver/BUILD.bazel b/vendor/github.com/coreos/go-semver/semver/BUILD.bazel deleted file mode 100644 index 2faf2cf9fa7..00000000000 --- a/vendor/github.com/coreos/go-semver/semver/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "semver.go", - "sort.go", - ], - importmap = "installer/vendor/github.com/coreos/go-semver/semver", - importpath = "github.com/coreos/go-semver/semver", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/go-systemd/daemon/BUILD.bazel b/vendor/github.com/coreos/go-systemd/daemon/BUILD.bazel deleted file mode 100644 index 667d9282c57..00000000000 --- a/vendor/github.com/coreos/go-systemd/daemon/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "sdnotify.go", - "watchdog.go", - ], - importmap = "installer/vendor/github.com/coreos/go-systemd/daemon", - importpath = "github.com/coreos/go-systemd/daemon", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/go-systemd/dbus/BUILD.bazel b/vendor/github.com/coreos/go-systemd/dbus/BUILD.bazel deleted file mode 100644 index 22fe914cb6e..00000000000 --- a/vendor/github.com/coreos/go-systemd/dbus/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "dbus.go", - "methods.go", - "properties.go", - "set.go", - "subscription.go", - "subscription_set.go", - ], - importmap = "installer/vendor/github.com/coreos/go-systemd/dbus", - importpath = "github.com/coreos/go-systemd/dbus", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/godbus/dbus:go_default_library"], -) diff --git a/vendor/github.com/coreos/go-systemd/journal/BUILD.bazel b/vendor/github.com/coreos/go-systemd/journal/BUILD.bazel deleted file mode 100644 index fe899d909c6..00000000000 --- a/vendor/github.com/coreos/go-systemd/journal/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["journal.go"], - importmap = "installer/vendor/github.com/coreos/go-systemd/journal", - importpath = "github.com/coreos/go-systemd/journal", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/go-systemd/unit/BUILD.bazel b/vendor/github.com/coreos/go-systemd/unit/BUILD.bazel deleted file mode 100644 index c5870e897ed..00000000000 --- a/vendor/github.com/coreos/go-systemd/unit/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "deserialize.go", - "escape.go", - "option.go", - "serialize.go", - ], - importmap = "installer/vendor/github.com/coreos/go-systemd/unit", - importpath = "github.com/coreos/go-systemd/unit", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/ignition/config/shared/errors/BUILD.bazel b/vendor/github.com/coreos/ignition/config/shared/errors/BUILD.bazel deleted file mode 100644 index 685baa41609..00000000000 --- a/vendor/github.com/coreos/ignition/config/shared/errors/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["errors.go"], - importmap = "installer/vendor/github.com/coreos/ignition/config/shared/errors", - importpath = "github.com/coreos/ignition/config/shared/errors", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/ignition/config/shared/validations/BUILD.bazel b/vendor/github.com/coreos/ignition/config/shared/validations/BUILD.bazel deleted file mode 100644 index b6d8a63481f..00000000000 --- a/vendor/github.com/coreos/ignition/config/shared/validations/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["unit.go"], - importmap = "installer/vendor/github.com/coreos/ignition/config/shared/validations", - importpath = "github.com/coreos/ignition/config/shared/validations", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/go-systemd/unit:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/errors:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/util/BUILD.bazel b/vendor/github.com/coreos/ignition/config/util/BUILD.bazel deleted file mode 100644 index 21c97b8ba4e..00000000000 --- a/vendor/github.com/coreos/ignition/config/util/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "helpers.go", - "parsingErrors.go", - ], - importmap = "installer/vendor/github.com/coreos/ignition/config/util", - importpath = "github.com/coreos/ignition/config/util", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/ajeddeloh/go-json:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/errors:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_3_experimental/types:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - "//vendor/go4.org/errorutil:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/v1/BUILD.bazel b/vendor/github.com/coreos/ignition/config/v1/BUILD.bazel deleted file mode 100644 index eb1e2ee3684..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cloudinit.go", - "config.go", - ], - importmap = "installer/vendor/github.com/coreos/ignition/config/v1", - importpath = "github.com/coreos/ignition/config/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/ajeddeloh/go-json:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/errors:go_default_library", - "//vendor/github.com/coreos/ignition/config/util:go_default_library", - "//vendor/github.com/coreos/ignition/config/v1/types:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/v1/types/BUILD.bazel b/vendor/github.com/coreos/ignition/config/v1/types/BUILD.bazel deleted file mode 100644 index f5a9afa8307..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "disk.go", - "file.go", - "filesystem.go", - "group.go", - "networkd.go", - "partition.go", - "passwd.go", - "path.go", - "raid.go", - "storage.go", - "systemd.go", - "unit.go", - "user.go", - ], - importmap = "installer/vendor/github.com/coreos/ignition/config/v1/types", - importpath = "github.com/coreos/ignition/config/v1/types", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/ignition/config/shared/errors:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/v2_0/BUILD.bazel b/vendor/github.com/coreos/ignition/config/v2_0/BUILD.bazel deleted file mode 100644 index 3b5f6cbadcd..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "append.go", - "cloudinit.go", - "config.go", - "translate.go", - ], - importmap = "installer/vendor/github.com/coreos/ignition/config/v2_0", - importpath = "github.com/coreos/ignition/config/v2_0", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/ajeddeloh/go-json:go_default_library", - "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/errors:go_default_library", - "//vendor/github.com/coreos/ignition/config/v1:go_default_library", - "//vendor/github.com/coreos/ignition/config/v1/types:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_0/types:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - "//vendor/github.com/vincent-petithory/dataurl:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/BUILD.bazel b/vendor/github.com/coreos/ignition/config/v2_0/types/BUILD.bazel deleted file mode 100644 index 45b2fd1c608..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "compression.go", - "config.go", - "disk.go", - "file.go", - "filesystem.go", - "group.go", - "hash.go", - "ignition.go", - "networkd.go", - "partition.go", - "passwd.go", - "path.go", - "raid.go", - "storage.go", - "systemd.go", - "unit.go", - "url.go", - "user.go", - "verification.go", - ], - importmap = "installer/vendor/github.com/coreos/ignition/config/v2_0/types", - importpath = "github.com/coreos/ignition/config/v2_0/types", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/coreos/go-systemd/unit:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/errors:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/validations:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - "//vendor/github.com/vincent-petithory/dataurl:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/v2_1/BUILD.bazel b/vendor/github.com/coreos/ignition/config/v2_1/BUILD.bazel deleted file mode 100644 index 220bd803994..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "append.go", - "cloudinit.go", - "config.go", - "translate.go", - ], - importmap = "installer/vendor/github.com/coreos/ignition/config/v2_1", - importpath = "github.com/coreos/ignition/config/v2_1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/ajeddeloh/go-json:go_default_library", - "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/errors:go_default_library", - "//vendor/github.com/coreos/ignition/config/util:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_0:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_0/types:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_1/types:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/BUILD.bazel b/vendor/github.com/coreos/ignition/config/v2_1/types/BUILD.bazel deleted file mode 100644 index f6dc3c5f385..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "directory.go", - "disk.go", - "file.go", - "filesystem.go", - "ignition.go", - "link.go", - "mode.go", - "node.go", - "partition.go", - "passwd.go", - "path.go", - "raid.go", - "schema.go", - "unit.go", - "url.go", - "verification.go", - ], - importmap = "installer/vendor/github.com/coreos/ignition/config/v2_1/types", - importpath = "github.com/coreos/ignition/config/v2_1/types", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/coreos/go-systemd/unit:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/errors:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/validations:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - "//vendor/github.com/vincent-petithory/dataurl:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/v2_2/BUILD.bazel b/vendor/github.com/coreos/ignition/config/v2_2/BUILD.bazel deleted file mode 100644 index e8ed4152da6..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "append.go", - "cloudinit.go", - "config.go", - "translate.go", - ], - importmap = "installer/vendor/github.com/coreos/ignition/config/v2_2", - importpath = "github.com/coreos/ignition/config/v2_2", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/ajeddeloh/go-json:go_default_library", - "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/errors:go_default_library", - "//vendor/github.com/coreos/ignition/config/util:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_1:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_1/types:go_default_library", - "//vendor/github.com/coreos/ignition/config/v2_2/types:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/BUILD.bazel b/vendor/github.com/coreos/ignition/config/v2_2/types/BUILD.bazel deleted file mode 100644 index 6a26af78b08..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "ca.go", - "config.go", - "directory.go", - "disk.go", - "file.go", - "filesystem.go", - "ignition.go", - "link.go", - "mode.go", - "node.go", - "partition.go", - "passwd.go", - "path.go", - "raid.go", - "schema.go", - "unit.go", - "url.go", - "verification.go", - ], - importmap = "installer/vendor/github.com/coreos/ignition/config/v2_2/types", - importpath = "github.com/coreos/ignition/config/v2_2/types", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/coreos/go-systemd/unit:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/errors:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/validations:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - "//vendor/github.com/vincent-petithory/dataurl:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/v2_3_experimental/types/BUILD.bazel b/vendor/github.com/coreos/ignition/config/v2_3_experimental/types/BUILD.bazel deleted file mode 100644 index 891629c9b8a..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_3_experimental/types/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "ca.go", - "config.go", - "directory.go", - "disk.go", - "file.go", - "filesystem.go", - "ignition.go", - "link.go", - "mode.go", - "node.go", - "partition.go", - "passwd.go", - "path.go", - "raid.go", - "schema.go", - "unit.go", - "url.go", - "verification.go", - ], - importmap = "installer/vendor/github.com/coreos/ignition/config/v2_3_experimental/types", - importpath = "github.com/coreos/ignition/config/v2_3_experimental/types", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/coreos/go-systemd/unit:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/errors:go_default_library", - "//vendor/github.com/coreos/ignition/config/shared/validations:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - "//vendor/github.com/vincent-petithory/dataurl:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/validate/BUILD.bazel b/vendor/github.com/coreos/ignition/config/validate/BUILD.bazel deleted file mode 100644 index 5a98706b9d9..00000000000 --- a/vendor/github.com/coreos/ignition/config/validate/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["validate.go"], - importmap = "installer/vendor/github.com/coreos/ignition/config/validate", - importpath = "github.com/coreos/ignition/config/validate", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/ajeddeloh/go-json:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/astjson:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/astnode:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/report:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/validate/astjson/BUILD.bazel b/vendor/github.com/coreos/ignition/config/validate/astjson/BUILD.bazel deleted file mode 100644 index eb185ebea8b..00000000000 --- a/vendor/github.com/coreos/ignition/config/validate/astjson/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["node.go"], - importmap = "installer/vendor/github.com/coreos/ignition/config/validate/astjson", - importpath = "github.com/coreos/ignition/config/validate/astjson", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/ajeddeloh/go-json:go_default_library", - "//vendor/github.com/coreos/ignition/config/validate/astnode:go_default_library", - "//vendor/go4.org/errorutil:go_default_library", - ], -) diff --git a/vendor/github.com/coreos/ignition/config/validate/astnode/BUILD.bazel b/vendor/github.com/coreos/ignition/config/validate/astnode/BUILD.bazel deleted file mode 100644 index ce9d4796b08..00000000000 --- a/vendor/github.com/coreos/ignition/config/validate/astnode/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["astnode.go"], - importmap = "installer/vendor/github.com/coreos/ignition/config/validate/astnode", - importpath = "github.com/coreos/ignition/config/validate/astnode", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/ignition/config/validate/report/BUILD.bazel b/vendor/github.com/coreos/ignition/config/validate/report/BUILD.bazel deleted file mode 100644 index 03f5201927d..00000000000 --- a/vendor/github.com/coreos/ignition/config/validate/report/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["report.go"], - importmap = "installer/vendor/github.com/coreos/ignition/config/validate/report", - importpath = "github.com/coreos/ignition/config/validate/report", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/pkg/capnslog/BUILD.bazel b/vendor/github.com/coreos/pkg/capnslog/BUILD.bazel deleted file mode 100644 index 33da0fed679..00000000000 --- a/vendor/github.com/coreos/pkg/capnslog/BUILD.bazel +++ /dev/null @@ -1,52 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "formatters.go", - "glog_formatter.go", - "init.go", - "init_windows.go", - "journald_formatter.go", - "log_hijack.go", - "logmap.go", - "pkg_logger.go", - "syslog_formatter.go", - ], - importmap = "installer/vendor/github.com/coreos/pkg/capnslog", - importpath = "github.com/coreos/pkg/capnslog", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/github.com/coreos/go-systemd/journal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/github.com/coreos/go-systemd/journal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/github.com/coreos/go-systemd/journal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/github.com/coreos/go-systemd/journal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/coreos/go-systemd/journal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/github.com/coreos/go-systemd/journal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/github.com/coreos/go-systemd/journal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/github.com/coreos/go-systemd/journal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/github.com/coreos/go-systemd/journal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/github.com/coreos/go-systemd/journal:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/coreos/pkg/health/BUILD.bazel b/vendor/github.com/coreos/pkg/health/BUILD.bazel deleted file mode 100644 index 326203b71ca..00000000000 --- a/vendor/github.com/coreos/pkg/health/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["health.go"], - importmap = "installer/vendor/github.com/coreos/pkg/health", - importpath = "github.com/coreos/pkg/health", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/coreos/pkg/httputil:go_default_library"], -) diff --git a/vendor/github.com/coreos/pkg/httputil/BUILD.bazel b/vendor/github.com/coreos/pkg/httputil/BUILD.bazel deleted file mode 100644 index 22c2919f106..00000000000 --- a/vendor/github.com/coreos/pkg/httputil/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cookie.go", - "json.go", - ], - importmap = "installer/vendor/github.com/coreos/pkg/httputil", - importpath = "github.com/coreos/pkg/httputil", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/pkg/timeutil/BUILD.bazel b/vendor/github.com/coreos/pkg/timeutil/BUILD.bazel deleted file mode 100644 index 81ab3d016da..00000000000 --- a/vendor/github.com/coreos/pkg/timeutil/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["backoff.go"], - importmap = "installer/vendor/github.com/coreos/pkg/timeutil", - importpath = "github.com/coreos/pkg/timeutil", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/coreos/tectonic-config/config/kube-addon/BUILD.bazel b/vendor/github.com/coreos/tectonic-config/config/kube-addon/BUILD.bazel deleted file mode 100644 index 228a79cffea..00000000000 --- a/vendor/github.com/coreos/tectonic-config/config/kube-addon/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["config.go"], - importmap = "installer/vendor/github.com/coreos/tectonic-config/config/kube-addon", - importpath = "github.com/coreos/tectonic-config/config/kube-addon", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], -) diff --git a/vendor/github.com/coreos/tectonic-config/config/kube-core/BUILD.bazel b/vendor/github.com/coreos/tectonic-config/config/kube-core/BUILD.bazel deleted file mode 100644 index c6bbb3dd392..00000000000 --- a/vendor/github.com/coreos/tectonic-config/config/kube-core/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["config.go"], - importmap = "installer/vendor/github.com/coreos/tectonic-config/config/kube-core", - importpath = "github.com/coreos/tectonic-config/config/kube-core", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], -) diff --git a/vendor/github.com/coreos/tectonic-config/config/tectonic-network/BUILD.bazel b/vendor/github.com/coreos/tectonic-config/config/tectonic-network/BUILD.bazel deleted file mode 100644 index 45591095800..00000000000 --- a/vendor/github.com/coreos/tectonic-config/config/tectonic-network/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["config.go"], - importmap = "installer/vendor/github.com/coreos/tectonic-config/config/tectonic-network", - importpath = "github.com/coreos/tectonic-config/config/tectonic-network", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], -) diff --git a/vendor/github.com/coreos/tectonic-config/config/tectonic-utility/BUILD.bazel b/vendor/github.com/coreos/tectonic-config/config/tectonic-utility/BUILD.bazel deleted file mode 100644 index e07edcec3dd..00000000000 --- a/vendor/github.com/coreos/tectonic-config/config/tectonic-utility/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["config.go"], - importmap = "installer/vendor/github.com/coreos/tectonic-config/config/tectonic-utility", - importpath = "github.com/coreos/tectonic-config/config/tectonic-utility", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library"], -) diff --git a/vendor/github.com/davecgh/go-spew/spew/BUILD.bazel b/vendor/github.com/davecgh/go-spew/spew/BUILD.bazel deleted file mode 100644 index d1753a5d57c..00000000000 --- a/vendor/github.com/davecgh/go-spew/spew/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bypass.go", - "common.go", - "config.go", - "doc.go", - "dump.go", - "format.go", - "spew.go", - ], - importmap = "installer/vendor/github.com/davecgh/go-spew/spew", - importpath = "github.com/davecgh/go-spew/spew", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/BUILD.bazel b/vendor/github.com/elazarl/go-bindata-assetfs/BUILD.bazel deleted file mode 100644 index 2d497753783..00000000000 --- a/vendor/github.com/elazarl/go-bindata-assetfs/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "assetfs.go", - "doc.go", - ], - importmap = "installer/vendor/github.com/elazarl/go-bindata-assetfs", - importpath = "github.com/elazarl/go-bindata-assetfs", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/emicklei/go-restful-swagger12/BUILD.bazel b/vendor/github.com/emicklei/go-restful-swagger12/BUILD.bazel deleted file mode 100644 index 94d11ce7254..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api_declaration_list.go", - "config.go", - "model_builder.go", - "model_list.go", - "model_property_ext.go", - "model_property_list.go", - "ordered_route_map.go", - "swagger.go", - "swagger_builder.go", - "swagger_webservice.go", - ], - importmap = "installer/vendor/github.com/emicklei/go-restful-swagger12", - importpath = "github.com/emicklei/go-restful-swagger12", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/emicklei/go-restful/log:go_default_library", - ], -) diff --git a/vendor/github.com/emicklei/go-restful/BUILD.bazel b/vendor/github.com/emicklei/go-restful/BUILD.bazel deleted file mode 100644 index c31b6131c85..00000000000 --- a/vendor/github.com/emicklei/go-restful/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "compress.go", - "compressor_cache.go", - "compressor_pools.go", - "compressors.go", - "constants.go", - "container.go", - "cors_filter.go", - "curly.go", - "curly_route.go", - "doc.go", - "entity_accessors.go", - "filter.go", - "jsr311.go", - "logger.go", - "mime.go", - "options_filter.go", - "parameter.go", - "path_expression.go", - "request.go", - "response.go", - "route.go", - "route_builder.go", - "router.go", - "service_error.go", - "web_service.go", - "web_service_container.go", - ], - importmap = "installer/vendor/github.com/emicklei/go-restful", - importpath = "github.com/emicklei/go-restful", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/emicklei/go-restful/log:go_default_library"], -) diff --git a/vendor/github.com/emicklei/go-restful/log/BUILD.bazel b/vendor/github.com/emicklei/go-restful/log/BUILD.bazel deleted file mode 100644 index d3dc92d36b6..00000000000 --- a/vendor/github.com/emicklei/go-restful/log/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["log.go"], - importmap = "installer/vendor/github.com/emicklei/go-restful/log", - importpath = "github.com/emicklei/go-restful/log", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/evanphx/json-patch/BUILD.bazel b/vendor/github.com/evanphx/json-patch/BUILD.bazel deleted file mode 100644 index 46219d51f96..00000000000 --- a/vendor/github.com/evanphx/json-patch/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "merge.go", - "patch.go", - ], - importmap = "installer/vendor/github.com/evanphx/json-patch", - importpath = "github.com/evanphx/json-patch", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/ghodss/yaml/BUILD.bazel b/vendor/github.com/ghodss/yaml/BUILD.bazel deleted file mode 100644 index a881a54d264..00000000000 --- a/vendor/github.com/ghodss/yaml/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "fields.go", - "yaml.go", - ], - importmap = "installer/vendor/github.com/ghodss/yaml", - importpath = "github.com/ghodss/yaml", - visibility = ["//visibility:public"], - deps = ["//vendor/gopkg.in/yaml.v2:go_default_library"], -) diff --git a/vendor/github.com/go-ini/ini/BUILD.bazel b/vendor/github.com/go-ini/ini/BUILD.bazel deleted file mode 100644 index d217da02179..00000000000 --- a/vendor/github.com/go-ini/ini/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "error.go", - "ini.go", - "key.go", - "parser.go", - "section.go", - "struct.go", - ], - importmap = "installer/vendor/github.com/go-ini/ini", - importpath = "github.com/go-ini/ini", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/go-openapi/jsonpointer/BUILD.bazel b/vendor/github.com/go-openapi/jsonpointer/BUILD.bazel deleted file mode 100644 index c17e5a2e065..00000000000 --- a/vendor/github.com/go-openapi/jsonpointer/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["pointer.go"], - importmap = "installer/vendor/github.com/go-openapi/jsonpointer", - importpath = "github.com/go-openapi/jsonpointer", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/go-openapi/swag:go_default_library"], -) diff --git a/vendor/github.com/go-openapi/jsonreference/BUILD.bazel b/vendor/github.com/go-openapi/jsonreference/BUILD.bazel deleted file mode 100644 index d5882ac57a5..00000000000 --- a/vendor/github.com/go-openapi/jsonreference/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["reference.go"], - importmap = "installer/vendor/github.com/go-openapi/jsonreference", - importpath = "github.com/go-openapi/jsonreference", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/PuerkitoBio/purell:go_default_library", - "//vendor/github.com/go-openapi/jsonpointer:go_default_library", - ], -) diff --git a/vendor/github.com/go-openapi/spec/BUILD.bazel b/vendor/github.com/go-openapi/spec/BUILD.bazel deleted file mode 100644 index 038ccc0d047..00000000000 --- a/vendor/github.com/go-openapi/spec/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bindata.go", - "contact_info.go", - "expander.go", - "external_docs.go", - "header.go", - "info.go", - "items.go", - "license.go", - "operation.go", - "parameter.go", - "path_item.go", - "paths.go", - "ref.go", - "response.go", - "responses.go", - "schema.go", - "security_scheme.go", - "spec.go", - "swagger.go", - "tag.go", - "xml_object.go", - ], - importmap = "installer/vendor/github.com/go-openapi/spec", - importpath = "github.com/go-openapi/spec", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/go-openapi/jsonpointer:go_default_library", - "//vendor/github.com/go-openapi/jsonreference:go_default_library", - "//vendor/github.com/go-openapi/swag:go_default_library", - ], -) diff --git a/vendor/github.com/go-openapi/swag/BUILD.bazel b/vendor/github.com/go-openapi/swag/BUILD.bazel deleted file mode 100644 index 7cf203b4b0d..00000000000 --- a/vendor/github.com/go-openapi/swag/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "convert.go", - "convert_types.go", - "json.go", - "loading.go", - "net.go", - "path.go", - "util.go", - "yaml.go", - ], - importmap = "installer/vendor/github.com/go-openapi/swag", - importpath = "github.com/go-openapi/swag", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/mailru/easyjson/jlexer:go_default_library", - "//vendor/github.com/mailru/easyjson/jwriter:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", - ], -) diff --git a/vendor/github.com/gogo/protobuf/proto/BUILD.bazel b/vendor/github.com/gogo/protobuf/proto/BUILD.bazel deleted file mode 100644 index ca02178985e..00000000000 --- a/vendor/github.com/gogo/protobuf/proto/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clone.go", - "decode.go", - "decode_gogo.go", - "duration.go", - "duration_gogo.go", - "encode.go", - "encode_gogo.go", - "equal.go", - "extensions.go", - "extensions_gogo.go", - "lib.go", - "lib_gogo.go", - "message_set.go", - "pointer_unsafe.go", - "pointer_unsafe_gogo.go", - "properties.go", - "properties_gogo.go", - "skip_gogo.go", - "text.go", - "text_gogo.go", - "text_parser.go", - "timestamp.go", - "timestamp_gogo.go", - ], - importmap = "installer/vendor/github.com/gogo/protobuf/proto", - importpath = "github.com/gogo/protobuf/proto", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/gogo/protobuf/sortkeys/BUILD.bazel b/vendor/github.com/gogo/protobuf/sortkeys/BUILD.bazel deleted file mode 100644 index 97dfa87a276..00000000000 --- a/vendor/github.com/gogo/protobuf/sortkeys/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["sortkeys.go"], - importmap = "installer/vendor/github.com/gogo/protobuf/sortkeys", - importpath = "github.com/gogo/protobuf/sortkeys", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/golang/glog/BUILD.bazel b/vendor/github.com/golang/glog/BUILD.bazel deleted file mode 100644 index c94e8d57520..00000000000 --- a/vendor/github.com/golang/glog/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "glog.go", - "glog_file.go", - ], - importmap = "installer/vendor/github.com/golang/glog", - importpath = "github.com/golang/glog", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/golang/protobuf/jsonpb/BUILD.bazel b/vendor/github.com/golang/protobuf/jsonpb/BUILD.bazel deleted file mode 100644 index 51fa902d5f3..00000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["jsonpb.go"], - importmap = "installer/vendor/github.com/golang/protobuf/jsonpb", - importpath = "github.com/golang/protobuf/jsonpb", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - ], -) diff --git a/vendor/github.com/golang/protobuf/proto/BUILD.bazel b/vendor/github.com/golang/protobuf/proto/BUILD.bazel deleted file mode 100644 index 959a375f4d1..00000000000 --- a/vendor/github.com/golang/protobuf/proto/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clone.go", - "decode.go", - "encode.go", - "equal.go", - "extensions.go", - "lib.go", - "message_set.go", - "pointer_unsafe.go", - "properties.go", - "text.go", - "text_parser.go", - ], - importmap = "installer/vendor/github.com/golang/protobuf/proto", - importpath = "github.com/golang/protobuf/proto", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/golang/protobuf/ptypes/BUILD.bazel b/vendor/github.com/golang/protobuf/ptypes/BUILD.bazel deleted file mode 100644 index 624371efaaf..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "any.go", - "doc.go", - "duration.go", - "timestamp.go", - ], - importmap = "installer/vendor/github.com/golang/protobuf/ptypes", - importpath = "github.com/golang/protobuf/ptypes", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - ], -) diff --git a/vendor/github.com/golang/protobuf/ptypes/any/BUILD.bazel b/vendor/github.com/golang/protobuf/ptypes/any/BUILD.bazel deleted file mode 100644 index 254be520075..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["any.pb.go"], - importmap = "installer/vendor/github.com/golang/protobuf/ptypes/any", - importpath = "github.com/golang/protobuf/ptypes/any", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/BUILD.bazel b/vendor/github.com/golang/protobuf/ptypes/duration/BUILD.bazel deleted file mode 100644 index ee440cbd11c..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["duration.pb.go"], - importmap = "installer/vendor/github.com/golang/protobuf/ptypes/duration", - importpath = "github.com/golang/protobuf/ptypes/duration", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/BUILD.bazel b/vendor/github.com/golang/protobuf/ptypes/struct/BUILD.bazel deleted file mode 100644 index f2c3ab0abca..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/struct/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["struct.pb.go"], - importmap = "installer/vendor/github.com/golang/protobuf/ptypes/struct", - importpath = "github.com/golang/protobuf/ptypes/struct", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/BUILD.bazel b/vendor/github.com/golang/protobuf/ptypes/timestamp/BUILD.bazel deleted file mode 100644 index 015db40db73..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["timestamp.pb.go"], - importmap = "installer/vendor/github.com/golang/protobuf/ptypes/timestamp", - importpath = "github.com/golang/protobuf/ptypes/timestamp", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/google/btree/BUILD.bazel b/vendor/github.com/google/btree/BUILD.bazel deleted file mode 100644 index 58df1941d50..00000000000 --- a/vendor/github.com/google/btree/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["btree.go"], - importmap = "installer/vendor/github.com/google/btree", - importpath = "github.com/google/btree", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/google/gofuzz/BUILD.bazel b/vendor/github.com/google/gofuzz/BUILD.bazel deleted file mode 100644 index a343b5050f1..00000000000 --- a/vendor/github.com/google/gofuzz/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fuzz.go", - ], - importmap = "installer/vendor/github.com/google/gofuzz", - importpath = "github.com/google/gofuzz", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/googleapis/gnostic/BUILD.bazel b/vendor/github.com/googleapis/gnostic/BUILD.bazel deleted file mode 100644 index 080bb8d364a..00000000000 --- a/vendor/github.com/googleapis/gnostic/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") - -go_library( - name = "go_default_library", - srcs = ["gnostic.go"], - importmap = "installer/vendor/github.com/googleapis/gnostic", - importpath = "github.com/googleapis/gnostic", - visibility = ["//visibility:private"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", - "//vendor/github.com/googleapis/gnostic/OpenAPIv3:go_default_library", - "//vendor/github.com/googleapis/gnostic/compiler:go_default_library", - "//vendor/github.com/googleapis/gnostic/jsonwriter:go_default_library", - "//vendor/github.com/googleapis/gnostic/plugins:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", - ], -) - -go_binary( - name = "gnostic", - embed = [":go_default_library"], - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/BUILD.bazel b/vendor/github.com/googleapis/gnostic/OpenAPIv2/BUILD.bazel deleted file mode 100644 index a573eaa1b9a..00000000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "OpenAPIv2.go", - "OpenAPIv2.pb.go", - ], - importmap = "installer/vendor/github.com/googleapis/gnostic/OpenAPIv2", - importpath = "github.com/googleapis/gnostic/OpenAPIv2", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/googleapis/gnostic/compiler:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - ], -) diff --git a/vendor/github.com/googleapis/gnostic/compiler/BUILD.bazel b/vendor/github.com/googleapis/gnostic/compiler/BUILD.bazel deleted file mode 100644 index fc6140a662c..00000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "error.go", - "extension-handler.go", - "helpers.go", - "main.go", - "reader.go", - ], - importmap = "installer/vendor/github.com/googleapis/gnostic/compiler", - importpath = "github.com/googleapis/gnostic/compiler", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/googleapis/gnostic/extensions:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - ], -) diff --git a/vendor/github.com/googleapis/gnostic/extensions/BUILD.bazel b/vendor/github.com/googleapis/gnostic/extensions/BUILD.bazel deleted file mode 100644 index 61947bd119f..00000000000 --- a/vendor/github.com/googleapis/gnostic/extensions/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "extension.pb.go", - "extensions.go", - ], - importmap = "installer/vendor/github.com/googleapis/gnostic/extensions", - importpath = "github.com/googleapis/gnostic/extensions", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/golang/protobuf/ptypes:go_default_library", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - ], -) diff --git a/vendor/github.com/gregjones/httpcache/BUILD.bazel b/vendor/github.com/gregjones/httpcache/BUILD.bazel deleted file mode 100644 index 21788739ff1..00000000000 --- a/vendor/github.com/gregjones/httpcache/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["httpcache.go"], - importmap = "installer/vendor/github.com/gregjones/httpcache", - importpath = "github.com/gregjones/httpcache", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/BUILD.bazel b/vendor/github.com/gregjones/httpcache/diskcache/BUILD.bazel deleted file mode 100644 index 83b68cd02b9..00000000000 --- a/vendor/github.com/gregjones/httpcache/diskcache/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["diskcache.go"], - importmap = "installer/vendor/github.com/gregjones/httpcache/diskcache", - importpath = "github.com/gregjones/httpcache/diskcache", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/peterbourgon/diskv:go_default_library"], -) diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/BUILD.bazel b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/BUILD.bazel deleted file mode 100644 index 1d974a6c8a5..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "client_reporter.go", - "server.go", - "server_reporter.go", - "util.go", - ], - importmap = "installer/vendor/github.com/grpc-ecosystem/go-grpc-prometheus", - importpath = "github.com/grpc-ecosystem/go-grpc-prometheus", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - ], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel deleted file mode 100644 index f181a2bae42..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "convert.go", - "doc.go", - "errors.go", - "handler.go", - "marshal_json.go", - "marshal_jsonpb.go", - "marshaler.go", - "marshaler_registry.go", - "mux.go", - "pattern.go", - "proto2_convert.go", - "query.go", - ], - importmap = "installer/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime", - importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/jsonpb:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal:go_default_library", - "//vendor/github.com/grpc-ecosystem/grpc-gateway/utilities:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - "//vendor/google.golang.org/grpc/metadata:go_default_library", - ], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/BUILD.bazel deleted file mode 100644 index 5b3560cb9cd..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["stream_chunk.pb.go"], - importmap = "installer/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel deleted file mode 100644 index 276af163457..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "pattern.go", - "trie.go", - ], - importmap = "installer/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities", - importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/hashicorp/golang-lru/BUILD.bazel b/vendor/github.com/hashicorp/golang-lru/BUILD.bazel deleted file mode 100644 index e3dd6fb746e..00000000000 --- a/vendor/github.com/hashicorp/golang-lru/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "2q.go", - "arc.go", - "lru.go", - ], - importmap = "installer/vendor/github.com/hashicorp/golang-lru", - importpath = "github.com/hashicorp/golang-lru", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/hashicorp/golang-lru/simplelru:go_default_library"], -) diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/BUILD.bazel b/vendor/github.com/hashicorp/golang-lru/simplelru/BUILD.bazel deleted file mode 100644 index 2fa53898063..00000000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["lru.go"], - importmap = "installer/vendor/github.com/hashicorp/golang-lru/simplelru", - importpath = "github.com/hashicorp/golang-lru/simplelru", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/howeyc/gopass/BUILD.bazel b/vendor/github.com/howeyc/gopass/BUILD.bazel deleted file mode 100644 index f804d5a9c27..00000000000 --- a/vendor/github.com/howeyc/gopass/BUILD.bazel +++ /dev/null @@ -1,49 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "pass.go", - "terminal.go", - "terminal_solaris.go", - ], - importmap = "installer/vendor/github.com/howeyc/gopass", - importpath = "github.com/howeyc/gopass", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/imdario/mergo/BUILD.bazel b/vendor/github.com/imdario/mergo/BUILD.bazel deleted file mode 100644 index 896876aa24f..00000000000 --- a/vendor/github.com/imdario/mergo/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "map.go", - "merge.go", - "mergo.go", - ], - importmap = "installer/vendor/github.com/imdario/mergo", - importpath = "github.com/imdario/mergo", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel b/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel deleted file mode 100644 index 0feb6c8013b..00000000000 --- a/vendor/github.com/inconshreveable/mousetrap/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "trap_others.go", - "trap_windows.go", - "trap_windows_1.4.go", - ], - importmap = "installer/vendor/github.com/inconshreveable/mousetrap", - importpath = "github.com/inconshreveable/mousetrap", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/jmespath/go-jmespath/BUILD.bazel b/vendor/github.com/jmespath/go-jmespath/BUILD.bazel deleted file mode 100644 index 51ccce69dfe..00000000000 --- a/vendor/github.com/jmespath/go-jmespath/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api.go", - "astnodetype_string.go", - "functions.go", - "interpreter.go", - "lexer.go", - "parser.go", - "toktype_string.go", - "util.go", - ], - importmap = "installer/vendor/github.com/jmespath/go-jmespath", - importpath = "github.com/jmespath/go-jmespath", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/json-iterator/go/BUILD.bazel b/vendor/github.com/json-iterator/go/BUILD.bazel deleted file mode 100644 index 66e4db82915..00000000000 --- a/vendor/github.com/json-iterator/go/BUILD.bazel +++ /dev/null @@ -1,48 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "feature_adapter.go", - "feature_any.go", - "feature_any_array.go", - "feature_any_bool.go", - "feature_any_float.go", - "feature_any_int32.go", - "feature_any_int64.go", - "feature_any_invalid.go", - "feature_any_nil.go", - "feature_any_number.go", - "feature_any_object.go", - "feature_any_string.go", - "feature_any_uint32.go", - "feature_any_uint64.go", - "feature_config.go", - "feature_iter.go", - "feature_iter_array.go", - "feature_iter_float.go", - "feature_iter_int.go", - "feature_iter_object.go", - "feature_iter_skip.go", - "feature_iter_skip_strict.go", - "feature_iter_string.go", - "feature_json_number.go", - "feature_pool.go", - "feature_reflect.go", - "feature_reflect_array.go", - "feature_reflect_extension.go", - "feature_reflect_map.go", - "feature_reflect_native.go", - "feature_reflect_object.go", - "feature_reflect_slice.go", - "feature_reflect_struct_decoder.go", - "feature_stream.go", - "feature_stream_float.go", - "feature_stream_int.go", - "feature_stream_string.go", - "jsoniter.go", - ], - importmap = "installer/vendor/github.com/json-iterator/go", - importpath = "github.com/json-iterator/go", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/juju/ratelimit/BUILD.bazel b/vendor/github.com/juju/ratelimit/BUILD.bazel deleted file mode 100644 index 941a5744d2d..00000000000 --- a/vendor/github.com/juju/ratelimit/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "ratelimit.go", - "reader.go", - ], - importmap = "installer/vendor/github.com/juju/ratelimit", - importpath = "github.com/juju/ratelimit", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/kballard/go-shellquote/BUILD.bazel b/vendor/github.com/kballard/go-shellquote/BUILD.bazel deleted file mode 100644 index e828118d501..00000000000 --- a/vendor/github.com/kballard/go-shellquote/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "quote.go", - "unquote.go", - ], - importmap = "installer/vendor/github.com/kballard/go-shellquote", - importpath = "github.com/kballard/go-shellquote", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/BUILD.bazel b/vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/BUILD.bazel deleted file mode 100644 index e21d5a51e84..00000000000 --- a/vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/BUILD.bazel +++ /dev/null @@ -1,39 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api_group_builder.go", - "api_unversioned_resource_builder.go", - "api_version_builder.go", - "api_versioned_resource_builder.go", - "default_controller_fns.go", - "default_scheme_fns.go", - "default_storage_strategy.go", - "resource_interfaces.go", - "scheme.go", - ], - importmap = "installer/vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders", - importpath = "github.com/kubernetes-incubator/apiserver-builder/pkg/builders", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", - ], -) diff --git a/vendor/github.com/libvirt/libvirt-go/BUILD.bazel b/vendor/github.com/libvirt/libvirt-go/BUILD.bazel deleted file mode 100644 index b42a08b755c..00000000000 --- a/vendor/github.com/libvirt/libvirt-go/BUILD.bazel +++ /dev/null @@ -1,88 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "callbacks.go", - "callbacks_wrapper.go", - "callbacks_wrapper.h", - "connect.go", - "connect_compat.h", - "connect_wrapper.go", - "connect_wrapper.h", - "doc.go", - "domain.go", - "domain_compat.h", - "domain_events.go", - "domain_events_wrapper.go", - "domain_events_wrapper.h", - "domain_snapshot.go", - "domain_snapshot_wrapper.go", - "domain_snapshot_wrapper.h", - "domain_wrapper.go", - "domain_wrapper.h", - "error.go", - "error_compat.h", - "events.go", - "events_wrapper.go", - "events_wrapper.h", - "interface.go", - "interface_wrapper.go", - "interface_wrapper.h", - "lxc.go", - "lxc_wrapper.go", - "lxc_wrapper.h", - "network.go", - "network_compat.h", - "network_events.go", - "network_events_wrapper.go", - "network_events_wrapper.h", - "network_wrapper.go", - "network_wrapper.h", - "node_device.go", - "node_device_compat.h", - "node_device_events.go", - "node_device_events_wrapper.go", - "node_device_events_wrapper.h", - "node_device_wrapper.go", - "node_device_wrapper.h", - "nwfilter.go", - "nwfilter_binding.go", - "nwfilter_binding_compat.h", - "nwfilter_binding_wrapper.go", - "nwfilter_binding_wrapper.h", - "nwfilter_wrapper.go", - "nwfilter_wrapper.h", - "qemu.go", - "qemu_compat.h", - "qemu_wrapper.go", - "qemu_wrapper.h", - "secret.go", - "secret_compat.h", - "secret_events.go", - "secret_events_wrapper.go", - "secret_events_wrapper.h", - "secret_wrapper.go", - "secret_wrapper.h", - "storage_pool.go", - "storage_pool_compat.h", - "storage_pool_events.go", - "storage_pool_events_wrapper.go", - "storage_pool_events_wrapper.h", - "storage_pool_wrapper.go", - "storage_pool_wrapper.h", - "storage_volume.go", - "storage_volume_compat.h", - "storage_volume_wrapper.go", - "storage_volume_wrapper.h", - "stream.go", - "stream_compat.h", - "stream_wrapper.go", - "stream_wrapper.h", - "typedparams.go", - ], - cgo = True, - importmap = "installer/vendor/github.com/libvirt/libvirt-go", - importpath = "github.com/libvirt/libvirt-go", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/mailru/easyjson/BUILD.bazel b/vendor/github.com/mailru/easyjson/BUILD.bazel deleted file mode 100644 index 846b88f0044..00000000000 --- a/vendor/github.com/mailru/easyjson/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "helpers.go", - "raw.go", - ], - importmap = "installer/vendor/github.com/mailru/easyjson", - importpath = "github.com/mailru/easyjson", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/mailru/easyjson/jlexer:go_default_library", - "//vendor/github.com/mailru/easyjson/jwriter:go_default_library", - ], -) diff --git a/vendor/github.com/mailru/easyjson/buffer/BUILD.bazel b/vendor/github.com/mailru/easyjson/buffer/BUILD.bazel deleted file mode 100644 index 3057f5a99db..00000000000 --- a/vendor/github.com/mailru/easyjson/buffer/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["pool.go"], - importmap = "installer/vendor/github.com/mailru/easyjson/buffer", - importpath = "github.com/mailru/easyjson/buffer", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/mailru/easyjson/jlexer/BUILD.bazel b/vendor/github.com/mailru/easyjson/jlexer/BUILD.bazel deleted file mode 100644 index 7a887c57bd2..00000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bytestostr.go", - "error.go", - "lexer.go", - ], - importmap = "installer/vendor/github.com/mailru/easyjson/jlexer", - importpath = "github.com/mailru/easyjson/jlexer", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/mailru/easyjson/jwriter/BUILD.bazel b/vendor/github.com/mailru/easyjson/jwriter/BUILD.bazel deleted file mode 100644 index 6a600a39a29..00000000000 --- a/vendor/github.com/mailru/easyjson/jwriter/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["writer.go"], - importmap = "installer/vendor/github.com/mailru/easyjson/jwriter", - importpath = "github.com/mailru/easyjson/jwriter", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/mailru/easyjson/buffer:go_default_library"], -) diff --git a/vendor/github.com/mattn/go-colorable/BUILD.bazel b/vendor/github.com/mattn/go-colorable/BUILD.bazel deleted file mode 100644 index 06ec5e2d307..00000000000 --- a/vendor/github.com/mattn/go-colorable/BUILD.bazel +++ /dev/null @@ -1,49 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "colorable_others.go", - "colorable_windows.go", - "noncolorable.go", - ], - importmap = "installer/vendor/github.com/mattn/go-colorable", - importpath = "github.com/mattn/go-colorable", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:android": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "@io_bazel_rules_go//go/platform:nacl": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "@io_bazel_rules_go//go/platform:plan9": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/mattn/go-isatty/BUILD.bazel b/vendor/github.com/mattn/go-isatty/BUILD.bazel deleted file mode 100644 index 7a571e215bc..00000000000 --- a/vendor/github.com/mattn/go-isatty/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "isatty_bsd.go", - "isatty_linux.go", - "isatty_linux_ppc64x.go", - "isatty_solaris.go", - "isatty_windows.go", - ], - importmap = "installer/vendor/github.com/mattn/go-isatty", - importpath = "github.com/mattn/go-isatty", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:linux_ppc64": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux_ppc64le": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris_amd64": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel deleted file mode 100644 index f6648cb6113..00000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "decode.go", - "doc.go", - "encode.go", - ], - importmap = "installer/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil", - importpath = "github.com/matttproud/golang_protobuf_extensions/pbutil", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/mgutz/ansi/BUILD.bazel b/vendor/github.com/mgutz/ansi/BUILD.bazel deleted file mode 100644 index 7c772c1fbed..00000000000 --- a/vendor/github.com/mgutz/ansi/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "ansi.go", - "doc.go", - "print.go", - ], - importmap = "installer/vendor/github.com/mgutz/ansi", - importpath = "github.com/mgutz/ansi", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/mattn/go-colorable:go_default_library"], -) diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/BUILD.bazel b/vendor/github.com/mxk/go-flowrate/flowrate/BUILD.bazel deleted file mode 100644 index 1cb986b60d7..00000000000 --- a/vendor/github.com/mxk/go-flowrate/flowrate/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "flowrate.go", - "io.go", - "util.go", - ], - importmap = "installer/vendor/github.com/mxk/go-flowrate/flowrate", - importpath = "github.com/mxk/go-flowrate/flowrate", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/openshift/hive/contrib/pkg/aws_tag_deprovision/BUILD.bazel b/vendor/github.com/openshift/hive/contrib/pkg/aws_tag_deprovision/BUILD.bazel deleted file mode 100644 index dee988cd12e..00000000000 --- a/vendor/github.com/openshift/hive/contrib/pkg/aws_tag_deprovision/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["aws_tag_deprovision.go"], - importmap = "installer/vendor/github.com/openshift/hive/contrib/pkg/aws_tag_deprovision", - importpath = "github.com/openshift/hive/contrib/pkg/aws_tag_deprovision", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/elb:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/iam:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/route53:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/s3:go_default_library", - "//vendor/github.com/aws/aws-sdk-go/service/s3/s3manager:go_default_library", - "//vendor/github.com/sirupsen/logrus:go_default_library", - "//vendor/github.com/spf13/cobra:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - ], -) diff --git a/vendor/github.com/pborman/uuid/BUILD.bazel b/vendor/github.com/pborman/uuid/BUILD.bazel deleted file mode 100644 index 42f38c11dea..00000000000 --- a/vendor/github.com/pborman/uuid/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "dce.go", - "doc.go", - "hash.go", - "json.go", - "node.go", - "time.go", - "util.go", - "uuid.go", - "version1.go", - "version4.go", - ], - importmap = "installer/vendor/github.com/pborman/uuid", - importpath = "github.com/pborman/uuid", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/peterbourgon/diskv/BUILD.bazel b/vendor/github.com/peterbourgon/diskv/BUILD.bazel deleted file mode 100644 index 677f1ba0f72..00000000000 --- a/vendor/github.com/peterbourgon/diskv/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "compression.go", - "diskv.go", - "index.go", - ], - importmap = "installer/vendor/github.com/peterbourgon/diskv", - importpath = "github.com/peterbourgon/diskv", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/google/btree:go_default_library"], -) diff --git a/vendor/github.com/pmezard/go-difflib/difflib/BUILD.bazel b/vendor/github.com/pmezard/go-difflib/difflib/BUILD.bazel deleted file mode 100644 index d0c4ef78793..00000000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["difflib.go"], - importmap = "installer/vendor/github.com/pmezard/go-difflib/difflib", - importpath = "github.com/pmezard/go-difflib/difflib", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel b/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel deleted file mode 100644 index 7a8ad035391..00000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "collector.go", - "counter.go", - "desc.go", - "doc.go", - "expvar_collector.go", - "fnv.go", - "gauge.go", - "go_collector.go", - "histogram.go", - "http.go", - "metric.go", - "observer.go", - "process_collector.go", - "registry.go", - "summary.go", - "timer.go", - "untyped.go", - "value.go", - "vec.go", - ], - importmap = "installer/vendor/github.com/prometheus/client_golang/prometheus", - importpath = "github.com/prometheus/client_golang/prometheus", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/beorn7/perks/quantile:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/prometheus/client_model/go:go_default_library", - "//vendor/github.com/prometheus/common/expfmt:go_default_library", - "//vendor/github.com/prometheus/common/model:go_default_library", - "//vendor/github.com/prometheus/procfs:go_default_library", - ], -) diff --git a/vendor/github.com/prometheus/client_model/go/BUILD.bazel b/vendor/github.com/prometheus/client_model/go/BUILD.bazel deleted file mode 100644 index fe0bdaee02b..00000000000 --- a/vendor/github.com/prometheus/client_model/go/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["metrics.pb.go"], - importmap = "installer/vendor/github.com/prometheus/client_model/go", - importpath = "github.com/prometheus/client_model/go", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/github.com/prometheus/common/expfmt/BUILD.bazel b/vendor/github.com/prometheus/common/expfmt/BUILD.bazel deleted file mode 100644 index 90baf6fca4d..00000000000 --- a/vendor/github.com/prometheus/common/expfmt/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "decode.go", - "encode.go", - "expfmt.go", - "text_create.go", - "text_parse.go", - ], - importmap = "installer/vendor/github.com/prometheus/common/expfmt", - importpath = "github.com/prometheus/common/expfmt", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/matttproud/golang_protobuf_extensions/pbutil:go_default_library", - "//vendor/github.com/prometheus/client_model/go:go_default_library", - "//vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg:go_default_library", - "//vendor/github.com/prometheus/common/model:go_default_library", - ], -) diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/BUILD.bazel b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/BUILD.bazel deleted file mode 100644 index efa9d7843a7..00000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["autoneg.go"], - importmap = "installer/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - importpath = "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - visibility = ["//vendor/github.com/prometheus/common:__subpackages__"], -) diff --git a/vendor/github.com/prometheus/common/model/BUILD.bazel b/vendor/github.com/prometheus/common/model/BUILD.bazel deleted file mode 100644 index 1d82a68a4f8..00000000000 --- a/vendor/github.com/prometheus/common/model/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "alert.go", - "fingerprinting.go", - "fnv.go", - "labels.go", - "labelset.go", - "metric.go", - "model.go", - "signature.go", - "silence.go", - "time.go", - "value.go", - ], - importmap = "installer/vendor/github.com/prometheus/common/model", - importpath = "github.com/prometheus/common/model", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/prometheus/procfs/BUILD.bazel b/vendor/github.com/prometheus/procfs/BUILD.bazel deleted file mode 100644 index 1f47f89f854..00000000000 --- a/vendor/github.com/prometheus/procfs/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "buddyinfo.go", - "doc.go", - "fs.go", - "ipvs.go", - "mdstat.go", - "mountstats.go", - "proc.go", - "proc_io.go", - "proc_limits.go", - "proc_stat.go", - "stat.go", - "xfrm.go", - ], - importmap = "installer/vendor/github.com/prometheus/procfs", - importpath = "github.com/prometheus/procfs", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/prometheus/procfs/xfs:go_default_library"], -) diff --git a/vendor/github.com/prometheus/procfs/xfs/BUILD.bazel b/vendor/github.com/prometheus/procfs/xfs/BUILD.bazel deleted file mode 100644 index 733ec5d4fb6..00000000000 --- a/vendor/github.com/prometheus/procfs/xfs/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "parse.go", - "xfs.go", - ], - importmap = "installer/vendor/github.com/prometheus/procfs/xfs", - importpath = "github.com/prometheus/procfs/xfs", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/shurcooL/vfsgen/BUILD.bazel b/vendor/github.com/shurcooL/vfsgen/BUILD.bazel deleted file mode 100644 index d606414d749..00000000000 --- a/vendor/github.com/shurcooL/vfsgen/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "commentwriter.go", - "doc.go", - "generator.go", - "options.go", - "stringwriter.go", - ], - importmap = "installer/vendor/github.com/shurcooL/vfsgen", - importpath = "github.com/shurcooL/vfsgen", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/shurcooL/httpfs/vfsutil:go_default_library"], -) diff --git a/vendor/github.com/sirupsen/logrus/BUILD.bazel b/vendor/github.com/sirupsen/logrus/BUILD.bazel deleted file mode 100644 index 8d5b5a03882..00000000000 --- a/vendor/github.com/sirupsen/logrus/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "entry.go", - "exported.go", - "formatter.go", - "hooks.go", - "json_formatter.go", - "logger.go", - "logrus.go", - "terminal_bsd.go", - "terminal_linux.go", - "terminal_notwindows.go", - "terminal_solaris.go", - "terminal_windows.go", - "text_formatter.go", - "writer.go", - ], - importmap = "installer/vendor/github.com/sirupsen/logrus", - importpath = "github.com/sirupsen/logrus", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/spf13/cobra/BUILD.bazel b/vendor/github.com/spf13/cobra/BUILD.bazel deleted file mode 100644 index 06040507833..00000000000 --- a/vendor/github.com/spf13/cobra/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "args.go", - "bash_completions.go", - "cobra.go", - "command.go", - "command_notwin.go", - "command_win.go", - "zsh_completions.go", - ], - importmap = "installer/vendor/github.com/spf13/cobra", - importpath = "github.com/spf13/cobra", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/spf13/pflag:go_default_library", - ] + select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/inconshreveable/mousetrap:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/github.com/spf13/pflag/BUILD.bazel b/vendor/github.com/spf13/pflag/BUILD.bazel deleted file mode 100644 index 0eea414b614..00000000000 --- a/vendor/github.com/spf13/pflag/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bool.go", - "bool_slice.go", - "count.go", - "duration.go", - "flag.go", - "float32.go", - "float64.go", - "golangflag.go", - "int.go", - "int32.go", - "int64.go", - "int8.go", - "int_slice.go", - "ip.go", - "ip_slice.go", - "ipmask.go", - "ipnet.go", - "string.go", - "string_array.go", - "string_slice.go", - "uint.go", - "uint16.go", - "uint32.go", - "uint64.go", - "uint8.go", - "uint_slice.go", - ], - importmap = "installer/vendor/github.com/spf13/pflag", - importpath = "github.com/spf13/pflag", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/stretchr/testify/BUILD.bazel b/vendor/github.com/stretchr/testify/BUILD.bazel deleted file mode 100644 index d1e79fc94fd..00000000000 --- a/vendor/github.com/stretchr/testify/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - importmap = "installer/vendor/github.com/stretchr/testify", - importpath = "github.com/stretchr/testify", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/stretchr/testify/assert:go_default_library", - "//vendor/github.com/stretchr/testify/http:go_default_library", - "//vendor/github.com/stretchr/testify/mock:go_default_library", - ], -) diff --git a/vendor/github.com/stretchr/testify/assert/BUILD.bazel b/vendor/github.com/stretchr/testify/assert/BUILD.bazel deleted file mode 100644 index a3c8cb120e6..00000000000 --- a/vendor/github.com/stretchr/testify/assert/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "assertion_format.go", - "assertion_forward.go", - "assertions.go", - "doc.go", - "errors.go", - "forward_assertions.go", - "http_assertions.go", - ], - importmap = "installer/vendor/github.com/stretchr/testify/assert", - importpath = "github.com/stretchr/testify/assert", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/pmezard/go-difflib/difflib:go_default_library", - ], -) diff --git a/vendor/github.com/ugorji/go/codec/BUILD.bazel b/vendor/github.com/ugorji/go/codec/BUILD.bazel deleted file mode 100644 index 96fdc540e8f..00000000000 --- a/vendor/github.com/ugorji/go/codec/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "0doc.go", - "binc.go", - "cbor.go", - "decode.go", - "decode_go.go", - "decode_go14.go", - "encode.go", - "fast-path.generated.go", - "gen.generated.go", - "gen.go", - "gen-helper.generated.go", - "gen_15.go", - "gen_16.go", - "gen_17.go", - "helper.go", - "helper_internal.go", - "helper_not_unsafe.go", - "json.go", - "msgpack.go", - "noop.go", - "prebuild.go", - "rpc.go", - "simple.go", - "time.go", - ], - importmap = "installer/vendor/github.com/ugorji/go/codec", - importpath = "github.com/ugorji/go/codec", - visibility = ["//visibility:public"], -) diff --git a/vendor/github.com/vincent-petithory/dataurl/BUILD.bazel b/vendor/github.com/vincent-petithory/dataurl/BUILD.bazel deleted file mode 100644 index add0ecf5403..00000000000 --- a/vendor/github.com/vincent-petithory/dataurl/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "dataurl.go", - "doc.go", - "lex.go", - "rfc2396.go", - ], - importmap = "installer/vendor/github.com/vincent-petithory/dataurl", - importpath = "github.com/vincent-petithory/dataurl", - visibility = ["//visibility:public"], -) diff --git a/vendor/go4.org/errorutil/BUILD.bazel b/vendor/go4.org/errorutil/BUILD.bazel deleted file mode 100644 index 44fd57372ed..00000000000 --- a/vendor/go4.org/errorutil/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["highlight.go"], - importmap = "installer/vendor/go4.org/errorutil", - importpath = "go4.org/errorutil", - visibility = ["//visibility:public"], -) diff --git a/vendor/golang.org/x/crypto/ssh/terminal/BUILD.bazel b/vendor/golang.org/x/crypto/ssh/terminal/BUILD.bazel deleted file mode 100644 index a1940eac3fe..00000000000 --- a/vendor/golang.org/x/crypto/ssh/terminal/BUILD.bazel +++ /dev/null @@ -1,44 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "terminal.go", - "util.go", - "util_bsd.go", - "util_linux.go", - "util_plan9.go", - "util_solaris.go", - "util_windows.go", - ], - importmap = "installer/vendor/golang.org/x/crypto/ssh/terminal", - importpath = "golang.org/x/crypto/ssh/terminal", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:dragonfly": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:netbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:openbsd": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:solaris": [ - "//vendor/golang.org/x/sys/unix:go_default_library", - ], - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/golang.org/x/sys/windows:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/golang.org/x/net/context/BUILD.bazel b/vendor/golang.org/x/net/context/BUILD.bazel deleted file mode 100644 index 604c493ca24..00000000000 --- a/vendor/golang.org/x/net/context/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "go17.go", - "go19.go", - "pre_go17.go", - "pre_go19.go", - ], - importmap = "installer/vendor/golang.org/x/net/context", - importpath = "golang.org/x/net/context", - visibility = ["//visibility:public"], -) diff --git a/vendor/golang.org/x/net/html/BUILD.bazel b/vendor/golang.org/x/net/html/BUILD.bazel deleted file mode 100644 index 6eced2d4946..00000000000 --- a/vendor/golang.org/x/net/html/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "const.go", - "doc.go", - "doctype.go", - "entity.go", - "escape.go", - "foreign.go", - "node.go", - "parse.go", - "render.go", - "token.go", - ], - importmap = "installer/vendor/golang.org/x/net/html", - importpath = "golang.org/x/net/html", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/net/html/atom:go_default_library"], -) diff --git a/vendor/golang.org/x/net/html/atom/BUILD.bazel b/vendor/golang.org/x/net/html/atom/BUILD.bazel deleted file mode 100644 index 17be877d2ad..00000000000 --- a/vendor/golang.org/x/net/html/atom/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "atom.go", - "table.go", - ], - importmap = "installer/vendor/golang.org/x/net/html/atom", - importpath = "golang.org/x/net/html/atom", - visibility = ["//visibility:public"], -) diff --git a/vendor/golang.org/x/net/http2/BUILD.bazel b/vendor/golang.org/x/net/http2/BUILD.bazel deleted file mode 100644 index 9becf28fcc9..00000000000 --- a/vendor/golang.org/x/net/http2/BUILD.bazel +++ /dev/null @@ -1,41 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "ciphers.go", - "client_conn_pool.go", - "configure_transport.go", - "databuffer.go", - "errors.go", - "flow.go", - "frame.go", - "go16.go", - "go17.go", - "go17_not18.go", - "go18.go", - "go19.go", - "gotrack.go", - "headermap.go", - "http2.go", - "not_go16.go", - "not_go17.go", - "not_go18.go", - "not_go19.go", - "pipe.go", - "server.go", - "transport.go", - "write.go", - "writesched.go", - "writesched_priority.go", - "writesched_random.go", - ], - importmap = "installer/vendor/golang.org/x/net/http2", - importpath = "golang.org/x/net/http2", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/net/http2/hpack:go_default_library", - "//vendor/golang.org/x/net/idna:go_default_library", - "//vendor/golang.org/x/net/lex/httplex:go_default_library", - ], -) diff --git a/vendor/golang.org/x/net/http2/hpack/BUILD.bazel b/vendor/golang.org/x/net/http2/hpack/BUILD.bazel deleted file mode 100644 index e1d2580af28..00000000000 --- a/vendor/golang.org/x/net/http2/hpack/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "encode.go", - "hpack.go", - "huffman.go", - "tables.go", - ], - importmap = "installer/vendor/golang.org/x/net/http2/hpack", - importpath = "golang.org/x/net/http2/hpack", - visibility = ["//visibility:public"], -) diff --git a/vendor/golang.org/x/net/idna/BUILD.bazel b/vendor/golang.org/x/net/idna/BUILD.bazel deleted file mode 100644 index 7e1816539a2..00000000000 --- a/vendor/golang.org/x/net/idna/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "idna.go", - "punycode.go", - "tables.go", - "trie.go", - "trieval.go", - ], - importmap = "installer/vendor/golang.org/x/net/idna", - importpath = "golang.org/x/net/idna", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/text/secure/bidirule:go_default_library", - "//vendor/golang.org/x/text/unicode/norm:go_default_library", - ], -) diff --git a/vendor/golang.org/x/net/internal/timeseries/BUILD.bazel b/vendor/golang.org/x/net/internal/timeseries/BUILD.bazel deleted file mode 100644 index 7bc0679fae4..00000000000 --- a/vendor/golang.org/x/net/internal/timeseries/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["timeseries.go"], - importmap = "installer/vendor/golang.org/x/net/internal/timeseries", - importpath = "golang.org/x/net/internal/timeseries", - visibility = ["//vendor/golang.org/x/net:__subpackages__"], -) diff --git a/vendor/golang.org/x/net/lex/httplex/BUILD.bazel b/vendor/golang.org/x/net/lex/httplex/BUILD.bazel deleted file mode 100644 index 1572d7d12f3..00000000000 --- a/vendor/golang.org/x/net/lex/httplex/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["httplex.go"], - importmap = "installer/vendor/golang.org/x/net/lex/httplex", - importpath = "golang.org/x/net/lex/httplex", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/net/idna:go_default_library"], -) diff --git a/vendor/golang.org/x/net/trace/BUILD.bazel b/vendor/golang.org/x/net/trace/BUILD.bazel deleted file mode 100644 index d811757f52a..00000000000 --- a/vendor/golang.org/x/net/trace/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "events.go", - "histogram.go", - "trace.go", - "trace_go16.go", - "trace_go17.go", - ], - importmap = "installer/vendor/golang.org/x/net/trace", - importpath = "golang.org/x/net/trace", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/net/internal/timeseries:go_default_library", - ], -) diff --git a/vendor/golang.org/x/net/websocket/BUILD.bazel b/vendor/golang.org/x/net/websocket/BUILD.bazel deleted file mode 100644 index 2c97520fad1..00000000000 --- a/vendor/golang.org/x/net/websocket/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "dial.go", - "hybi.go", - "server.go", - "websocket.go", - ], - importmap = "installer/vendor/golang.org/x/net/websocket", - importpath = "golang.org/x/net/websocket", - visibility = ["//visibility:public"], -) diff --git a/vendor/golang.org/x/sys/unix/BUILD.bazel b/vendor/golang.org/x/sys/unix/BUILD.bazel deleted file mode 100644 index 6cc68a786a4..00000000000 --- a/vendor/golang.org/x/sys/unix/BUILD.bazel +++ /dev/null @@ -1,208 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "asm_darwin_386.s", - "asm_darwin_amd64.s", - "asm_darwin_arm.s", - "asm_darwin_arm64.s", - "asm_dragonfly_amd64.s", - "asm_freebsd_386.s", - "asm_freebsd_amd64.s", - "asm_freebsd_arm.s", - "asm_linux_386.s", - "asm_linux_amd64.s", - "asm_linux_arm.s", - "asm_linux_arm64.s", - "asm_linux_mips64x.s", - "asm_linux_mipsx.s", - "asm_linux_ppc64x.s", - "asm_linux_s390x.s", - "asm_netbsd_386.s", - "asm_netbsd_amd64.s", - "asm_netbsd_arm.s", - "asm_openbsd_386.s", - "asm_openbsd_amd64.s", - "asm_openbsd_arm.s", - "asm_solaris_amd64.s", - "bluetooth_linux.go", - "cap_freebsd.go", - "constants.go", - "dev_darwin.go", - "dev_dragonfly.go", - "dev_freebsd.go", - "dev_linux.go", - "dev_netbsd.go", - "dev_openbsd.go", - "dirent.go", - "endian_big.go", - "endian_little.go", - "env_unix.go", - "env_unset.go", - "errors_freebsd_386.go", - "errors_freebsd_amd64.go", - "errors_freebsd_arm.go", - "file_unix.go", - "flock.go", - "flock_linux_32bit.go", - "openbsd_pledge.go", - "pagesize_unix.go", - "race0.go", - "sockcmsg_linux.go", - "sockcmsg_unix.go", - "str.go", - "syscall.go", - "syscall_bsd.go", - "syscall_darwin.go", - "syscall_darwin_386.go", - "syscall_darwin_amd64.go", - "syscall_darwin_arm.go", - "syscall_darwin_arm64.go", - "syscall_dragonfly.go", - "syscall_dragonfly_amd64.go", - "syscall_freebsd.go", - "syscall_freebsd_386.go", - "syscall_freebsd_amd64.go", - "syscall_freebsd_arm.go", - "syscall_linux.go", - "syscall_linux_386.go", - "syscall_linux_amd64.go", - "syscall_linux_amd64_gc.go", - "syscall_linux_arm.go", - "syscall_linux_arm64.go", - "syscall_linux_mips64x.go", - "syscall_linux_mipsx.go", - "syscall_linux_ppc64x.go", - "syscall_linux_s390x.go", - "syscall_netbsd.go", - "syscall_netbsd_386.go", - "syscall_netbsd_amd64.go", - "syscall_netbsd_arm.go", - "syscall_no_getwd.go", - "syscall_openbsd.go", - "syscall_openbsd_386.go", - "syscall_openbsd_amd64.go", - "syscall_openbsd_arm.go", - "syscall_solaris.go", - "syscall_solaris_amd64.go", - "syscall_unix.go", - "syscall_unix_gc.go", - "timestruct.go", - "zerrors_darwin_386.go", - "zerrors_darwin_amd64.go", - "zerrors_darwin_arm.go", - "zerrors_darwin_arm64.go", - "zerrors_dragonfly_amd64.go", - "zerrors_freebsd_386.go", - "zerrors_freebsd_amd64.go", - "zerrors_freebsd_arm.go", - "zerrors_linux_386.go", - "zerrors_linux_amd64.go", - "zerrors_linux_arm.go", - "zerrors_linux_arm64.go", - "zerrors_linux_mips.go", - "zerrors_linux_mips64.go", - "zerrors_linux_mips64le.go", - "zerrors_linux_mipsle.go", - "zerrors_linux_ppc64.go", - "zerrors_linux_ppc64le.go", - "zerrors_linux_s390x.go", - "zerrors_netbsd_386.go", - "zerrors_netbsd_amd64.go", - "zerrors_netbsd_arm.go", - "zerrors_openbsd_386.go", - "zerrors_openbsd_amd64.go", - "zerrors_openbsd_arm.go", - "zerrors_solaris_amd64.go", - "zptrace386_linux.go", - "zptracearm_linux.go", - "zptracemips_linux.go", - "zptracemipsle_linux.go", - "zsyscall_darwin_386.go", - "zsyscall_darwin_amd64.go", - "zsyscall_darwin_arm.go", - "zsyscall_darwin_arm64.go", - "zsyscall_dragonfly_amd64.go", - "zsyscall_freebsd_386.go", - "zsyscall_freebsd_amd64.go", - "zsyscall_freebsd_arm.go", - "zsyscall_linux_386.go", - "zsyscall_linux_amd64.go", - "zsyscall_linux_arm.go", - "zsyscall_linux_arm64.go", - "zsyscall_linux_mips.go", - "zsyscall_linux_mips64.go", - "zsyscall_linux_mips64le.go", - "zsyscall_linux_mipsle.go", - "zsyscall_linux_ppc64.go", - "zsyscall_linux_ppc64le.go", - "zsyscall_linux_s390x.go", - "zsyscall_netbsd_386.go", - "zsyscall_netbsd_amd64.go", - "zsyscall_netbsd_arm.go", - "zsyscall_openbsd_386.go", - "zsyscall_openbsd_amd64.go", - "zsyscall_openbsd_arm.go", - "zsyscall_solaris_amd64.go", - "zsysctl_openbsd_386.go", - "zsysctl_openbsd_amd64.go", - "zsysctl_openbsd_arm.go", - "zsysnum_darwin_386.go", - "zsysnum_darwin_amd64.go", - "zsysnum_darwin_arm.go", - "zsysnum_darwin_arm64.go", - "zsysnum_dragonfly_amd64.go", - "zsysnum_freebsd_386.go", - "zsysnum_freebsd_amd64.go", - "zsysnum_freebsd_arm.go", - "zsysnum_linux_386.go", - "zsysnum_linux_amd64.go", - "zsysnum_linux_arm.go", - "zsysnum_linux_arm64.go", - "zsysnum_linux_mips.go", - "zsysnum_linux_mips64.go", - "zsysnum_linux_mips64le.go", - "zsysnum_linux_mipsle.go", - "zsysnum_linux_ppc64.go", - "zsysnum_linux_ppc64le.go", - "zsysnum_linux_s390x.go", - "zsysnum_netbsd_386.go", - "zsysnum_netbsd_amd64.go", - "zsysnum_netbsd_arm.go", - "zsysnum_openbsd_386.go", - "zsysnum_openbsd_amd64.go", - "zsysnum_openbsd_arm.go", - "zsysnum_solaris_amd64.go", - "ztypes_darwin_386.go", - "ztypes_darwin_amd64.go", - "ztypes_darwin_arm.go", - "ztypes_darwin_arm64.go", - "ztypes_dragonfly_amd64.go", - "ztypes_freebsd_386.go", - "ztypes_freebsd_amd64.go", - "ztypes_freebsd_arm.go", - "ztypes_linux_386.go", - "ztypes_linux_amd64.go", - "ztypes_linux_arm.go", - "ztypes_linux_arm64.go", - "ztypes_linux_mips.go", - "ztypes_linux_mips64.go", - "ztypes_linux_mips64le.go", - "ztypes_linux_mipsle.go", - "ztypes_linux_ppc64.go", - "ztypes_linux_ppc64le.go", - "ztypes_linux_s390x.go", - "ztypes_netbsd_386.go", - "ztypes_netbsd_amd64.go", - "ztypes_netbsd_arm.go", - "ztypes_openbsd_386.go", - "ztypes_openbsd_amd64.go", - "ztypes_openbsd_arm.go", - "ztypes_solaris_amd64.go", - ], - cgo = True, - importmap = "installer/vendor/golang.org/x/sys/unix", - importpath = "golang.org/x/sys/unix", - visibility = ["//visibility:public"], -) diff --git a/vendor/golang.org/x/sys/windows/BUILD.bazel b/vendor/golang.org/x/sys/windows/BUILD.bazel deleted file mode 100644 index 5feab7dfbce..00000000000 --- a/vendor/golang.org/x/sys/windows/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "asm_windows_386.s", - "asm_windows_amd64.s", - "dll_windows.go", - "env_unset.go", - "env_windows.go", - "eventlog.go", - "exec_windows.go", - "memory_windows.go", - "mksyscall.go", - "race0.go", - "security_windows.go", - "service.go", - "str.go", - "syscall.go", - "syscall_windows.go", - "types_windows.go", - "types_windows_386.go", - "types_windows_amd64.go", - "zsyscall_windows.go", - ], - importmap = "installer/vendor/golang.org/x/sys/windows", - importpath = "golang.org/x/sys/windows", - visibility = ["//visibility:public"], -) diff --git a/vendor/golang.org/x/text/BUILD.bazel b/vendor/golang.org/x/text/BUILD.bazel deleted file mode 100644 index ef6e3026b2f..00000000000 --- a/vendor/golang.org/x/text/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["doc.go"], - importmap = "installer/vendor/golang.org/x/text", - importpath = "golang.org/x/text", - visibility = ["//visibility:public"], -) diff --git a/vendor/golang.org/x/text/cases/BUILD.bazel b/vendor/golang.org/x/text/cases/BUILD.bazel deleted file mode 100644 index 8453637be0f..00000000000 --- a/vendor/golang.org/x/text/cases/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cases.go", - "context.go", - "fold.go", - "info.go", - "map.go", - "tables.go", - "trieval.go", - ], - cgo = True, - importmap = "installer/vendor/golang.org/x/text/cases", - importpath = "golang.org/x/text/cases", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/text/internal:go_default_library", - "//vendor/golang.org/x/text/language:go_default_library", - "//vendor/golang.org/x/text/transform:go_default_library", - "//vendor/golang.org/x/text/unicode/norm:go_default_library", - ], -) diff --git a/vendor/golang.org/x/text/internal/BUILD.bazel b/vendor/golang.org/x/text/internal/BUILD.bazel deleted file mode 100644 index 13e1acd3077..00000000000 --- a/vendor/golang.org/x/text/internal/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "internal.go", - "match.go", - "tables.go", - ], - importmap = "installer/vendor/golang.org/x/text/internal", - importpath = "golang.org/x/text/internal", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/text/language:go_default_library"], -) diff --git a/vendor/golang.org/x/text/internal/tag/BUILD.bazel b/vendor/golang.org/x/text/internal/tag/BUILD.bazel deleted file mode 100644 index 804a0bde973..00000000000 --- a/vendor/golang.org/x/text/internal/tag/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["tag.go"], - importmap = "installer/vendor/golang.org/x/text/internal/tag", - importpath = "golang.org/x/text/internal/tag", - visibility = ["//vendor/golang.org/x/text:__subpackages__"], -) diff --git a/vendor/golang.org/x/text/language/BUILD.bazel b/vendor/golang.org/x/text/language/BUILD.bazel deleted file mode 100644 index b2bf78684c4..00000000000 --- a/vendor/golang.org/x/text/language/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "common.go", - "coverage.go", - "go1_1.go", - "go1_2.go", - "index.go", - "language.go", - "lookup.go", - "match.go", - "parse.go", - "tables.go", - "tags.go", - ], - importmap = "installer/vendor/golang.org/x/text/language", - importpath = "golang.org/x/text/language", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/text/internal/tag:go_default_library"], -) diff --git a/vendor/golang.org/x/text/runes/BUILD.bazel b/vendor/golang.org/x/text/runes/BUILD.bazel deleted file mode 100644 index 266f52f90c8..00000000000 --- a/vendor/golang.org/x/text/runes/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cond.go", - "runes.go", - ], - importmap = "installer/vendor/golang.org/x/text/runes", - importpath = "golang.org/x/text/runes", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/text/transform:go_default_library"], -) diff --git a/vendor/golang.org/x/text/secure/bidirule/BUILD.bazel b/vendor/golang.org/x/text/secure/bidirule/BUILD.bazel deleted file mode 100644 index f32b1b9b5d3..00000000000 --- a/vendor/golang.org/x/text/secure/bidirule/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["bidirule.go"], - importmap = "installer/vendor/golang.org/x/text/secure/bidirule", - importpath = "golang.org/x/text/secure/bidirule", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/text/transform:go_default_library", - "//vendor/golang.org/x/text/unicode/bidi:go_default_library", - ], -) diff --git a/vendor/golang.org/x/text/secure/precis/BUILD.bazel b/vendor/golang.org/x/text/secure/precis/BUILD.bazel deleted file mode 100644 index ca4ef0a5e2b..00000000000 --- a/vendor/golang.org/x/text/secure/precis/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "class.go", - "context.go", - "doc.go", - "nickname.go", - "options.go", - "profile.go", - "profiles.go", - "tables.go", - "transformer.go", - "trieval.go", - ], - importmap = "installer/vendor/golang.org/x/text/secure/precis", - importpath = "golang.org/x/text/secure/precis", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/text/cases:go_default_library", - "//vendor/golang.org/x/text/language:go_default_library", - "//vendor/golang.org/x/text/runes:go_default_library", - "//vendor/golang.org/x/text/secure/bidirule:go_default_library", - "//vendor/golang.org/x/text/transform:go_default_library", - "//vendor/golang.org/x/text/unicode/norm:go_default_library", - "//vendor/golang.org/x/text/width:go_default_library", - ], -) diff --git a/vendor/golang.org/x/text/transform/BUILD.bazel b/vendor/golang.org/x/text/transform/BUILD.bazel deleted file mode 100644 index 3cd3f472269..00000000000 --- a/vendor/golang.org/x/text/transform/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["transform.go"], - importmap = "installer/vendor/golang.org/x/text/transform", - importpath = "golang.org/x/text/transform", - visibility = ["//visibility:public"], -) diff --git a/vendor/golang.org/x/text/unicode/bidi/BUILD.bazel b/vendor/golang.org/x/text/unicode/bidi/BUILD.bazel deleted file mode 100644 index 62c626008fb..00000000000 --- a/vendor/golang.org/x/text/unicode/bidi/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "bidi.go", - "bracket.go", - "core.go", - "prop.go", - "tables.go", - "trieval.go", - ], - importmap = "installer/vendor/golang.org/x/text/unicode/bidi", - importpath = "golang.org/x/text/unicode/bidi", - visibility = ["//visibility:public"], -) diff --git a/vendor/golang.org/x/text/unicode/norm/BUILD.bazel b/vendor/golang.org/x/text/unicode/norm/BUILD.bazel deleted file mode 100644 index fc5898d7730..00000000000 --- a/vendor/golang.org/x/text/unicode/norm/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "composition.go", - "forminfo.go", - "input.go", - "iter.go", - "normalize.go", - "readwriter.go", - "tables.go", - "transform.go", - "trie.go", - ], - importmap = "installer/vendor/golang.org/x/text/unicode/norm", - importpath = "golang.org/x/text/unicode/norm", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/text/transform:go_default_library"], -) diff --git a/vendor/golang.org/x/text/width/BUILD.bazel b/vendor/golang.org/x/text/width/BUILD.bazel deleted file mode 100644 index 2203cbe9cac..00000000000 --- a/vendor/golang.org/x/text/width/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "kind_string.go", - "tables.go", - "transform.go", - "trieval.go", - "width.go", - ], - importmap = "installer/vendor/golang.org/x/text/width", - importpath = "golang.org/x/text/width", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/text/transform:go_default_library"], -) diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/BUILD.bazel b/vendor/google.golang.org/genproto/googleapis/rpc/status/BUILD.bazel deleted file mode 100644 index 7b5a103f7aa..00000000000 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["status.pb.go"], - importmap = "installer/vendor/google.golang.org/genproto/googleapis/rpc/status", - importpath = "google.golang.org/genproto/googleapis/rpc/status", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - ], -) diff --git a/vendor/google.golang.org/grpc/BUILD.bazel b/vendor/google.golang.org/grpc/BUILD.bazel deleted file mode 100644 index e0bbbc96d36..00000000000 --- a/vendor/google.golang.org/grpc/BUILD.bazel +++ /dev/null @@ -1,44 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "backoff.go", - "balancer.go", - "call.go", - "clientconn.go", - "codec.go", - "doc.go", - "go16.go", - "go17.go", - "grpclb.go", - "interceptor.go", - "proxy.go", - "rpc_util.go", - "server.go", - "stream.go", - "trace.go", - ], - importmap = "installer/vendor/google.golang.org/grpc", - importpath = "google.golang.org/grpc", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/net/http2:go_default_library", - "//vendor/golang.org/x/net/trace:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/grpclb/grpc_lb_v1:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - "//vendor/google.golang.org/grpc/internal:go_default_library", - "//vendor/google.golang.org/grpc/keepalive:go_default_library", - "//vendor/google.golang.org/grpc/metadata:go_default_library", - "//vendor/google.golang.org/grpc/naming:go_default_library", - "//vendor/google.golang.org/grpc/peer:go_default_library", - "//vendor/google.golang.org/grpc/stats:go_default_library", - "//vendor/google.golang.org/grpc/status:go_default_library", - "//vendor/google.golang.org/grpc/tap:go_default_library", - "//vendor/google.golang.org/grpc/transport:go_default_library", - ], -) diff --git a/vendor/google.golang.org/grpc/codes/BUILD.bazel b/vendor/google.golang.org/grpc/codes/BUILD.bazel deleted file mode 100644 index bcac3b9829c..00000000000 --- a/vendor/google.golang.org/grpc/codes/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "code_string.go", - "codes.go", - ], - importmap = "installer/vendor/google.golang.org/grpc/codes", - importpath = "google.golang.org/grpc/codes", - visibility = ["//visibility:public"], -) diff --git a/vendor/google.golang.org/grpc/credentials/BUILD.bazel b/vendor/google.golang.org/grpc/credentials/BUILD.bazel deleted file mode 100644 index 94e2a1f6ba5..00000000000 --- a/vendor/google.golang.org/grpc/credentials/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "credentials.go", - "credentials_util_go17.go", - "credentials_util_go18.go", - "credentials_util_pre_go17.go", - ], - importmap = "installer/vendor/google.golang.org/grpc/credentials", - importpath = "google.golang.org/grpc/credentials", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/net/context:go_default_library"], -) diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD.bazel b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD.bazel deleted file mode 100644 index c690c44ef2b..00000000000 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["grpclb.pb.go"], - importmap = "installer/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1", - importpath = "google.golang.org/grpc/grpclb/grpc_lb_v1", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/protobuf/proto:go_default_library"], -) diff --git a/vendor/google.golang.org/grpc/grpclog/BUILD.bazel b/vendor/google.golang.org/grpc/grpclog/BUILD.bazel deleted file mode 100644 index 95a7c1c7c7f..00000000000 --- a/vendor/google.golang.org/grpc/grpclog/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["logger.go"], - importmap = "installer/vendor/google.golang.org/grpc/grpclog", - importpath = "google.golang.org/grpc/grpclog", - visibility = ["//visibility:public"], -) diff --git a/vendor/google.golang.org/grpc/internal/BUILD.bazel b/vendor/google.golang.org/grpc/internal/BUILD.bazel deleted file mode 100644 index f6b8578ec8e..00000000000 --- a/vendor/google.golang.org/grpc/internal/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["internal.go"], - importmap = "installer/vendor/google.golang.org/grpc/internal", - importpath = "google.golang.org/grpc/internal", - visibility = ["//visibility:public"], -) diff --git a/vendor/google.golang.org/grpc/keepalive/BUILD.bazel b/vendor/google.golang.org/grpc/keepalive/BUILD.bazel deleted file mode 100644 index dc7df32f634..00000000000 --- a/vendor/google.golang.org/grpc/keepalive/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["keepalive.go"], - importmap = "installer/vendor/google.golang.org/grpc/keepalive", - importpath = "google.golang.org/grpc/keepalive", - visibility = ["//visibility:public"], -) diff --git a/vendor/google.golang.org/grpc/metadata/BUILD.bazel b/vendor/google.golang.org/grpc/metadata/BUILD.bazel deleted file mode 100644 index ede1e73b651..00000000000 --- a/vendor/google.golang.org/grpc/metadata/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["metadata.go"], - importmap = "installer/vendor/google.golang.org/grpc/metadata", - importpath = "google.golang.org/grpc/metadata", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/net/context:go_default_library"], -) diff --git a/vendor/google.golang.org/grpc/naming/BUILD.bazel b/vendor/google.golang.org/grpc/naming/BUILD.bazel deleted file mode 100644 index df2f2d04101..00000000000 --- a/vendor/google.golang.org/grpc/naming/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["naming.go"], - importmap = "installer/vendor/google.golang.org/grpc/naming", - importpath = "google.golang.org/grpc/naming", - visibility = ["//visibility:public"], -) diff --git a/vendor/google.golang.org/grpc/peer/BUILD.bazel b/vendor/google.golang.org/grpc/peer/BUILD.bazel deleted file mode 100644 index 3df48c994da..00000000000 --- a/vendor/google.golang.org/grpc/peer/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["peer.go"], - importmap = "installer/vendor/google.golang.org/grpc/peer", - importpath = "google.golang.org/grpc/peer", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - ], -) diff --git a/vendor/google.golang.org/grpc/stats/BUILD.bazel b/vendor/google.golang.org/grpc/stats/BUILD.bazel deleted file mode 100644 index bcc11041fbb..00000000000 --- a/vendor/google.golang.org/grpc/stats/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "handlers.go", - "stats.go", - ], - importmap = "installer/vendor/google.golang.org/grpc/stats", - importpath = "google.golang.org/grpc/stats", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/net/context:go_default_library"], -) diff --git a/vendor/google.golang.org/grpc/status/BUILD.bazel b/vendor/google.golang.org/grpc/status/BUILD.bazel deleted file mode 100644 index 1ca4d69fa0c..00000000000 --- a/vendor/google.golang.org/grpc/status/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["status.go"], - importmap = "installer/vendor/google.golang.org/grpc/status", - importpath = "google.golang.org/grpc/status", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/google.golang.org/genproto/googleapis/rpc/status:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - ], -) diff --git a/vendor/google.golang.org/grpc/tap/BUILD.bazel b/vendor/google.golang.org/grpc/tap/BUILD.bazel deleted file mode 100644 index 98b091c9496..00000000000 --- a/vendor/google.golang.org/grpc/tap/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["tap.go"], - importmap = "installer/vendor/google.golang.org/grpc/tap", - importpath = "google.golang.org/grpc/tap", - visibility = ["//visibility:public"], - deps = ["//vendor/golang.org/x/net/context:go_default_library"], -) diff --git a/vendor/google.golang.org/grpc/transport/BUILD.bazel b/vendor/google.golang.org/grpc/transport/BUILD.bazel deleted file mode 100644 index d0130ea437c..00000000000 --- a/vendor/google.golang.org/grpc/transport/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "control.go", - "go16.go", - "go17.go", - "handler_server.go", - "http2_client.go", - "http2_server.go", - "http_util.go", - "transport.go", - ], - importmap = "installer/vendor/google.golang.org/grpc/transport", - importpath = "google.golang.org/grpc/transport", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/golang.org/x/net/http2:go_default_library", - "//vendor/golang.org/x/net/http2/hpack:go_default_library", - "//vendor/google.golang.org/genproto/googleapis/rpc/status:go_default_library", - "//vendor/google.golang.org/grpc/codes:go_default_library", - "//vendor/google.golang.org/grpc/credentials:go_default_library", - "//vendor/google.golang.org/grpc/grpclog:go_default_library", - "//vendor/google.golang.org/grpc/keepalive:go_default_library", - "//vendor/google.golang.org/grpc/metadata:go_default_library", - "//vendor/google.golang.org/grpc/peer:go_default_library", - "//vendor/google.golang.org/grpc/stats:go_default_library", - "//vendor/google.golang.org/grpc/status:go_default_library", - "//vendor/google.golang.org/grpc/tap:go_default_library", - ], -) diff --git a/vendor/gopkg.in/AlecAivazis/survey.v1/BUILD.bazel b/vendor/gopkg.in/AlecAivazis/survey.v1/BUILD.bazel deleted file mode 100644 index cdab7ed4fe6..00000000000 --- a/vendor/gopkg.in/AlecAivazis/survey.v1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "confirm.go", - "editor.go", - "input.go", - "multiselect.go", - "password.go", - "select.go", - "survey.go", - "transform.go", - "validate.go", - ], - importmap = "installer/vendor/gopkg.in/AlecAivazis/survey.v1", - importpath = "gopkg.in/AlecAivazis/survey.v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/kballard/go-shellquote:go_default_library", - "//vendor/gopkg.in/AlecAivazis/survey.v1/core:go_default_library", - "//vendor/gopkg.in/AlecAivazis/survey.v1/terminal:go_default_library", - ], -) diff --git a/vendor/gopkg.in/AlecAivazis/survey.v1/core/BUILD.bazel b/vendor/gopkg.in/AlecAivazis/survey.v1/core/BUILD.bazel deleted file mode 100644 index 61ebe8ec643..00000000000 --- a/vendor/gopkg.in/AlecAivazis/survey.v1/core/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "renderer.go", - "template.go", - "write.go", - ], - importmap = "installer/vendor/gopkg.in/AlecAivazis/survey.v1/core", - importpath = "gopkg.in/AlecAivazis/survey.v1/core", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/mgutz/ansi:go_default_library", - "//vendor/gopkg.in/AlecAivazis/survey.v1/terminal:go_default_library", - ], -) diff --git a/vendor/gopkg.in/AlecAivazis/survey.v1/terminal/BUILD.bazel b/vendor/gopkg.in/AlecAivazis/survey.v1/terminal/BUILD.bazel deleted file mode 100644 index fa44ede92e1..00000000000 --- a/vendor/gopkg.in/AlecAivazis/survey.v1/terminal/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "buffered_reader.go", - "cursor.go", - "cursor_windows.go", - "display.go", - "display_posix.go", - "display_windows.go", - "error.go", - "output.go", - "output_windows.go", - "runereader.go", - "runereader_bsd.go", - "runereader_linux.go", - "runereader_posix.go", - "runereader_windows.go", - "sequences.go", - "stdio.go", - "syscall_windows.go", - "terminal.go", - ], - importmap = "installer/vendor/gopkg.in/AlecAivazis/survey.v1/terminal", - importpath = "gopkg.in/AlecAivazis/survey.v1/terminal", - visibility = ["//visibility:public"], - deps = select({ - "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/mattn/go-isatty:go_default_library", - ], - "//conditions:default": [], - }), -) diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/BUILD.bazel b/vendor/gopkg.in/alecthomas/kingpin.v2/BUILD.bazel deleted file mode 100644 index 75e30bc35c4..00000000000 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "actions.go", - "app.go", - "args.go", - "cmd.go", - "completions.go", - "doc.go", - "envar.go", - "flags.go", - "global.go", - "guesswidth.go", - "guesswidth_unix.go", - "model.go", - "parser.go", - "parsers.go", - "templates.go", - "usage.go", - "values.go", - "values_generated.go", - ], - importmap = "installer/vendor/gopkg.in/alecthomas/kingpin.v2", - importpath = "gopkg.in/alecthomas/kingpin.v2", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/alecthomas/template:go_default_library", - "//vendor/github.com/alecthomas/units:go_default_library", - ], -) diff --git a/vendor/gopkg.in/inf.v0/BUILD.bazel b/vendor/gopkg.in/inf.v0/BUILD.bazel deleted file mode 100644 index a78e229233f..00000000000 --- a/vendor/gopkg.in/inf.v0/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "dec.go", - "rounder.go", - ], - importmap = "installer/vendor/gopkg.in/inf.v0", - importpath = "gopkg.in/inf.v0", - visibility = ["//visibility:public"], -) diff --git a/vendor/gopkg.in/yaml.v2/BUILD.bazel b/vendor/gopkg.in/yaml.v2/BUILD.bazel deleted file mode 100644 index 47e11cf8b55..00000000000 --- a/vendor/gopkg.in/yaml.v2/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apic.go", - "decode.go", - "emitterc.go", - "encode.go", - "parserc.go", - "readerc.go", - "resolve.go", - "scannerc.go", - "sorter.go", - "writerc.go", - "yaml.go", - "yamlh.go", - "yamlprivateh.go", - ], - importmap = "installer/vendor/gopkg.in/yaml.v2", - importpath = "gopkg.in/yaml.v2", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/api/admission/v1beta1/BUILD.bazel b/vendor/k8s.io/api/admission/v1beta1/BUILD.bazel deleted file mode 100644 index 47ae2b037b2..00000000000 --- a/vendor/k8s.io/api/admission/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/admission/v1beta1", - importpath = "k8s.io/api/admission/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/authentication/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/BUILD.bazel b/vendor/k8s.io/api/admissionregistration/v1alpha1/BUILD.bazel deleted file mode 100644 index 3688672bddc..00000000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/admissionregistration/v1alpha1", - importpath = "k8s.io/api/admissionregistration/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/BUILD.bazel b/vendor/k8s.io/api/admissionregistration/v1beta1/BUILD.bazel deleted file mode 100644 index a0e6f368ae3..00000000000 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/admissionregistration/v1beta1", - importpath = "k8s.io/api/admissionregistration/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/apps/v1/BUILD.bazel b/vendor/k8s.io/api/apps/v1/BUILD.bazel deleted file mode 100644 index 9d12e30b430..00000000000 --- a/vendor/k8s.io/api/apps/v1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/apps/v1", - importpath = "k8s.io/api/apps/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/apps/v1beta1/BUILD.bazel b/vendor/k8s.io/api/apps/v1beta1/BUILD.bazel deleted file mode 100644 index d0a084f43f6..00000000000 --- a/vendor/k8s.io/api/apps/v1beta1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/apps/v1beta1", - importpath = "k8s.io/api/apps/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/apps/v1beta2/BUILD.bazel b/vendor/k8s.io/api/apps/v1beta2/BUILD.bazel deleted file mode 100644 index 40da90328f4..00000000000 --- a/vendor/k8s.io/api/apps/v1beta2/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/apps/v1beta2", - importpath = "k8s.io/api/apps/v1beta2", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/authentication/v1/BUILD.bazel b/vendor/k8s.io/api/authentication/v1/BUILD.bazel deleted file mode 100644 index 68edbdc73b7..00000000000 --- a/vendor/k8s.io/api/authentication/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/authentication/v1", - importpath = "k8s.io/api/authentication/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/authentication/v1beta1/BUILD.bazel b/vendor/k8s.io/api/authentication/v1beta1/BUILD.bazel deleted file mode 100644 index a832d59d71e..00000000000 --- a/vendor/k8s.io/api/authentication/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/authentication/v1beta1", - importpath = "k8s.io/api/authentication/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/authorization/v1/BUILD.bazel b/vendor/k8s.io/api/authorization/v1/BUILD.bazel deleted file mode 100644 index 1475e5a4be8..00000000000 --- a/vendor/k8s.io/api/authorization/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/authorization/v1", - importpath = "k8s.io/api/authorization/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/authorization/v1beta1/BUILD.bazel b/vendor/k8s.io/api/authorization/v1beta1/BUILD.bazel deleted file mode 100644 index fd25bc6718a..00000000000 --- a/vendor/k8s.io/api/authorization/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/authorization/v1beta1", - importpath = "k8s.io/api/authorization/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/autoscaling/v1/BUILD.bazel b/vendor/k8s.io/api/autoscaling/v1/BUILD.bazel deleted file mode 100644 index 61b5da1dcd7..00000000000 --- a/vendor/k8s.io/api/autoscaling/v1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/autoscaling/v1", - importpath = "k8s.io/api/autoscaling/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/BUILD.bazel b/vendor/k8s.io/api/autoscaling/v2beta1/BUILD.bazel deleted file mode 100644 index fc5f9f94fdb..00000000000 --- a/vendor/k8s.io/api/autoscaling/v2beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/autoscaling/v2beta1", - importpath = "k8s.io/api/autoscaling/v2beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/batch/v1/BUILD.bazel b/vendor/k8s.io/api/batch/v1/BUILD.bazel deleted file mode 100644 index aaebd39c57f..00000000000 --- a/vendor/k8s.io/api/batch/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/batch/v1", - importpath = "k8s.io/api/batch/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/batch/v1beta1/BUILD.bazel b/vendor/k8s.io/api/batch/v1beta1/BUILD.bazel deleted file mode 100644 index bb6fe278aa9..00000000000 --- a/vendor/k8s.io/api/batch/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/batch/v1beta1", - importpath = "k8s.io/api/batch/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/batch/v2alpha1/BUILD.bazel b/vendor/k8s.io/api/batch/v2alpha1/BUILD.bazel deleted file mode 100644 index cd017776ae3..00000000000 --- a/vendor/k8s.io/api/batch/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/batch/v2alpha1", - importpath = "k8s.io/api/batch/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/certificates/v1beta1/BUILD.bazel b/vendor/k8s.io/api/certificates/v1beta1/BUILD.bazel deleted file mode 100644 index 2925d2b44e6..00000000000 --- a/vendor/k8s.io/api/certificates/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/certificates/v1beta1", - importpath = "k8s.io/api/certificates/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/core/v1/BUILD.bazel b/vendor/k8s.io/api/core/v1/BUILD.bazel deleted file mode 100644 index d9b174ed4c7..00000000000 --- a/vendor/k8s.io/api/core/v1/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "annotation_key_constants.go", - "doc.go", - "generated.pb.go", - "meta.go", - "objectreference.go", - "register.go", - "resource.go", - "taint.go", - "toleration.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/core/v1", - importpath = "k8s.io/api/core/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/events/v1beta1/BUILD.bazel b/vendor/k8s.io/api/events/v1beta1/BUILD.bazel deleted file mode 100644 index ba34a8c8efb..00000000000 --- a/vendor/k8s.io/api/events/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/events/v1beta1", - importpath = "k8s.io/api/events/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/extensions/v1beta1/BUILD.bazel b/vendor/k8s.io/api/extensions/v1beta1/BUILD.bazel deleted file mode 100644 index e6bb8965904..00000000000 --- a/vendor/k8s.io/api/extensions/v1beta1/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/extensions/v1beta1", - importpath = "k8s.io/api/extensions/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/networking/v1/BUILD.bazel b/vendor/k8s.io/api/networking/v1/BUILD.bazel deleted file mode 100644 index 83e1474b77d..00000000000 --- a/vendor/k8s.io/api/networking/v1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/networking/v1", - importpath = "k8s.io/api/networking/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/policy/v1beta1/BUILD.bazel b/vendor/k8s.io/api/policy/v1beta1/BUILD.bazel deleted file mode 100644 index 231d8c3359b..00000000000 --- a/vendor/k8s.io/api/policy/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/policy/v1beta1", - importpath = "k8s.io/api/policy/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/rbac/v1/BUILD.bazel b/vendor/k8s.io/api/rbac/v1/BUILD.bazel deleted file mode 100644 index 3ffb7b4eb3a..00000000000 --- a/vendor/k8s.io/api/rbac/v1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/rbac/v1", - importpath = "k8s.io/api/rbac/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/rbac/v1alpha1/BUILD.bazel b/vendor/k8s.io/api/rbac/v1alpha1/BUILD.bazel deleted file mode 100644 index deaadaf6182..00000000000 --- a/vendor/k8s.io/api/rbac/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/rbac/v1alpha1", - importpath = "k8s.io/api/rbac/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/rbac/v1beta1/BUILD.bazel b/vendor/k8s.io/api/rbac/v1beta1/BUILD.bazel deleted file mode 100644 index e1694c3d9ee..00000000000 --- a/vendor/k8s.io/api/rbac/v1beta1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/rbac/v1beta1", - importpath = "k8s.io/api/rbac/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/BUILD.bazel b/vendor/k8s.io/api/scheduling/v1alpha1/BUILD.bazel deleted file mode 100644 index f97edc0168d..00000000000 --- a/vendor/k8s.io/api/scheduling/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/scheduling/v1alpha1", - importpath = "k8s.io/api/scheduling/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/settings/v1alpha1/BUILD.bazel b/vendor/k8s.io/api/settings/v1alpha1/BUILD.bazel deleted file mode 100644 index 291a968fdd4..00000000000 --- a/vendor/k8s.io/api/settings/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/settings/v1alpha1", - importpath = "k8s.io/api/settings/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/storage/v1/BUILD.bazel b/vendor/k8s.io/api/storage/v1/BUILD.bazel deleted file mode 100644 index 383008d81c6..00000000000 --- a/vendor/k8s.io/api/storage/v1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/storage/v1", - importpath = "k8s.io/api/storage/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/storage/v1alpha1/BUILD.bazel b/vendor/k8s.io/api/storage/v1alpha1/BUILD.bazel deleted file mode 100644 index f7d79dd8158..00000000000 --- a/vendor/k8s.io/api/storage/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/storage/v1alpha1", - importpath = "k8s.io/api/storage/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/api/storage/v1beta1/BUILD.bazel b/vendor/k8s.io/api/storage/v1beta1/BUILD.bazel deleted file mode 100644 index 39d25623cf4..00000000000 --- a/vendor/k8s.io/api/storage/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/api/storage/v1beta1", - importpath = "k8s.io/api/storage/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/api/equality/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/api/equality/BUILD.bazel deleted file mode 100644 index 1d10bf74563..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/equality/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["semantic.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/api/equality", - importpath = "k8s.io/apimachinery/pkg/api/equality", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD.bazel deleted file mode 100644 index 64b2f365cc6..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/api/errors", - importpath = "k8s.io/apimachinery/pkg/api/errors", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel deleted file mode 100644 index 913770f2d92..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - "firsthit_restmapper.go", - "help.go", - "interfaces.go", - "lazy.go", - "meta.go", - "multirestmapper.go", - "priority.go", - "restmapper.go", - "unstructured.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/api/meta", - importpath = "k8s.io/apimachinery/pkg/api/meta", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD.bazel deleted file mode 100644 index 0fa80623d46..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "amount.go", - "generated.pb.go", - "math.go", - "quantity.go", - "quantity_proto.go", - "scale_int.go", - "suffix.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/api/resource", - importpath = "k8s.io/apimachinery/pkg/api/resource", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/gopkg.in/inf.v0:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/api/validation/BUILD.bazel deleted file mode 100644 index a5c2246bb1b..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/validation/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generic.go", - "objectmeta.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/api/validation", - importpath = "k8s.io/apimachinery/pkg/api/validation", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/path/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/api/validation/path/BUILD.bazel deleted file mode 100644 index 6568f09237a..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/validation/path/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["name.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/api/validation/path", - importpath = "k8s.io/apimachinery/pkg/api/validation/path", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD.bazel deleted file mode 100644 index 5fa9c94d4e6..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "types.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/apimachinery", - importpath = "k8s.io/apimachinery/pkg/apimachinery", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD.bazel deleted file mode 100644 index 3196e689a5e..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "announced.go", - "group_factory.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/apimachinery/announced", - importpath = "k8s.io/apimachinery/pkg/apimachinery/announced", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD.bazel deleted file mode 100644 index c0abda1da0d..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["registered.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/apimachinery/registered", - importpath = "k8s.io/apimachinery/pkg/apimachinery/registered", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD.bazel deleted file mode 100644 index b7b512dd099..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "doc.go", - "register.go", - "types.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion", - importpath = "k8s.io/apimachinery/pkg/apis/meta/internalversion", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD.bazel deleted file mode 100644 index 6f8480f5713..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/BUILD.bazel +++ /dev/null @@ -1,46 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "controller_ref.go", - "conversion.go", - "doc.go", - "duration.go", - "generated.pb.go", - "group_version.go", - "helpers.go", - "labels.go", - "meta.go", - "micro_time.go", - "micro_time_proto.go", - "register.go", - "time.go", - "time_proto.go", - "types.go", - "types_swagger_doc_generated.go", - "watch.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/apis/meta/v1", - importpath = "k8s.io/apimachinery/pkg/apis/meta/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/gogo/protobuf/sortkeys:go_default_library", - "//vendor/github.com/google/gofuzz:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD.bazel deleted file mode 100644 index 451354d0ad3..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "helpers.go", - "unstructured.go", - "unstructured_list.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - importpath = "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD.bazel deleted file mode 100644 index 86bdca6bd12..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["validation.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation", - importpath = "k8s.io/apimachinery/pkg/apis/meta/v1/validation", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD.bazel deleted file mode 100644 index 770b73e15d4..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "deepcopy.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "types_swagger_doc_generated.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1", - importpath = "k8s.io/apimachinery/pkg/apis/meta/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/conversion/BUILD.bazel deleted file mode 100644 index e58cc07fec7..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/conversion/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "converter.go", - "deep_equal.go", - "doc.go", - "helper.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/conversion", - importpath = "k8s.io/apimachinery/pkg/conversion", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/third_party/forked/golang/reflect:go_default_library"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD.bazel deleted file mode 100644 index a021e88d924..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "convert.go", - "doc.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/conversion/queryparams", - importpath = "k8s.io/apimachinery/pkg/conversion/queryparams", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/fields/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/fields/BUILD.bazel deleted file mode 100644 index 16fdeab83d2..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/fields/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "fields.go", - "requirements.go", - "selector.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/fields", - importpath = "k8s.io/apimachinery/pkg/fields", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/selection:go_default_library"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel deleted file mode 100644 index b84fd6c57c2..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/labels/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "labels.go", - "selector.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/labels", - importpath = "k8s.io/apimachinery/pkg/labels", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/selection:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel deleted file mode 100644 index 26c9492f8cc..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/BUILD.bazel +++ /dev/null @@ -1,38 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "codec.go", - "codec_check.go", - "conversion.go", - "converter.go", - "doc.go", - "embedded.go", - "error.go", - "extension.go", - "generated.pb.go", - "helper.go", - "interfaces.go", - "register.go", - "scheme.go", - "scheme_builder.go", - "swagger_doc_generator.go", - "types.go", - "types_proto.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/runtime", - importpath = "k8s.io/apimachinery/pkg/runtime", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion/queryparams:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD.bazel deleted file mode 100644 index 4b80e6bf72f..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "generated.pb.go", - "group_version.go", - "interfaces.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/runtime/schema", - importpath = "k8s.io/apimachinery/pkg/runtime/schema", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/gogo/protobuf/proto:go_default_library"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD.bazel deleted file mode 100644 index 3a2fcc410a3..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "codec_factory.go", - "negotiated_codec.go", - "protobuf_extension.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/runtime/serializer", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel deleted file mode 100644 index 79640586b6f..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "json.go", - "meta.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer/json", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/json-iterator/go:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/framer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD.bazel deleted file mode 100644 index affb17402b5..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "protobuf.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/framer:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD.bazel deleted file mode 100644 index a843cba775e..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["recognizer.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD.bazel deleted file mode 100644 index e1d38aa8eb2..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["streaming.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer/streaming", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD.bazel deleted file mode 100644 index dbe2101edd7..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["versioning.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning", - importpath = "k8s.io/apimachinery/pkg/runtime/serializer/versioning", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/selection/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/selection/BUILD.bazel deleted file mode 100644 index 448f55e4e7c..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/selection/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["operator.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/selection", - importpath = "k8s.io/apimachinery/pkg/selection", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/types/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/types/BUILD.bazel deleted file mode 100644 index 56f999a3312..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/types/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "namespacedname.go", - "nodename.go", - "patch.go", - "uid.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/types", - importpath = "k8s.io/apimachinery/pkg/types", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD.bazel deleted file mode 100644 index f65219c3d15..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cache.go", - "lruexpirecache.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/cache", - importpath = "k8s.io/apimachinery/pkg/util/cache", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/hashicorp/golang-lru:go_default_library"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD.bazel deleted file mode 100644 index 7440c642238..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["clock.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/clock", - importpath = "k8s.io/apimachinery/pkg/util/clock", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/diff/BUILD.bazel deleted file mode 100644 index 608987a4c49..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["diff.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/diff", - importpath = "k8s.io/apimachinery/pkg/util/diff", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD.bazel deleted file mode 100644 index b6280e7ad6a..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/errors", - importpath = "k8s.io/apimachinery/pkg/util/errors", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD.bazel deleted file mode 100644 index 4936be92087..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["framer.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/framer", - importpath = "k8s.io/apimachinery/pkg/util/framer", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/httpstream/BUILD.bazel deleted file mode 100644 index 80490b2aa06..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "httpstream.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/httpstream", - importpath = "k8s.io/apimachinery/pkg/util/httpstream", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel deleted file mode 100644 index a8bc04e4041..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "generated.pb.go", - "intstr.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/intstr", - importpath = "k8s.io/apimachinery/pkg/util/intstr", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/google/gofuzz:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/json/BUILD.bazel deleted file mode 100644 index 52b5d9b8e0d..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/json/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["json.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/json", - importpath = "k8s.io/apimachinery/pkg/util/json", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel deleted file mode 100644 index 2ae58c83301..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "errors.go", - "util.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/mergepatch", - importpath = "k8s.io/apimachinery/pkg/util/mergepatch", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/ghodss/yaml:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel deleted file mode 100644 index a30f957b585..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/net/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "http.go", - "interface.go", - "port_range.go", - "port_split.go", - "util.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/net", - importpath = "k8s.io/apimachinery/pkg/util/net", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/net/http2:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD.bazel deleted file mode 100644 index e44bd09ce48..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "dial.go", - "doc.go", - "transport.go", - "upgradeaware.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/proxy", - importpath = "k8s.io/apimachinery/pkg/util/proxy", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/mxk/go-flowrate/flowrate:go_default_library", - "//vendor/golang.org/x/net/html:go_default_library", - "//vendor/golang.org/x/net/html/atom:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/third_party/forked/golang/netutil:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/rand/BUILD.bazel deleted file mode 100644 index 5c6084fde80..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/rand/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["rand.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/rand", - importpath = "k8s.io/apimachinery/pkg/util/rand", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel deleted file mode 100644 index 4843fdb877e..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["runtime.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/runtime", - importpath = "k8s.io/apimachinery/pkg/util/runtime", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD.bazel deleted file mode 100644 index 9f103c3cffb..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "byte.go", - "doc.go", - "empty.go", - "int.go", - "int64.go", - "string.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/sets", - importpath = "k8s.io/apimachinery/pkg/util/sets", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD.bazel deleted file mode 100644 index 8514c46b187..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "errors.go", - "meta.go", - "patch.go", - "types.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/strategicpatch", - importpath = "k8s.io/apimachinery/pkg/util/strategicpatch", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", - "//vendor/k8s.io/apimachinery/third_party/forked/golang/json:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD.bazel deleted file mode 100644 index 919bb93359e..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/uuid/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["uuid.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/uuid", - importpath = "k8s.io/apimachinery/pkg/util/uuid", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/pborman/uuid:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD.bazel deleted file mode 100644 index 42b66540750..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["validation.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/validation", - importpath = "k8s.io/apimachinery/pkg/util/validation", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD.bazel deleted file mode 100644 index 8304a6d7cf2..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "errors.go", - "path.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/validation/field", - importpath = "k8s.io/apimachinery/pkg/util/validation/field", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD.bazel deleted file mode 100644 index bb8f164c7d7..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "wait.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/wait", - importpath = "k8s.io/apimachinery/pkg/util/wait", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/BUILD.bazel deleted file mode 100644 index b9aa3085ee4..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "waitgroup.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/waitgroup", - importpath = "k8s.io/apimachinery/pkg/util/waitgroup", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel deleted file mode 100644 index 2b54786fbfe..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["decoder.go"], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/util/yaml", - importpath = "k8s.io/apimachinery/pkg/util/yaml", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/pkg/version/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/version/BUILD.bazel deleted file mode 100644 index 459723fc633..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/version/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "types.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/version", - importpath = "k8s.io/apimachinery/pkg/version", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel b/vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel deleted file mode 100644 index 5650ee56d72..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/watch/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "filter.go", - "mux.go", - "streamwatcher.go", - "until.go", - "watch.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/apimachinery/pkg/watch", - importpath = "k8s.io/apimachinery/pkg/watch", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - ], -) diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD.bazel b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD.bazel deleted file mode 100644 index 85dc78cbd1c..00000000000 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["fields.go"], - importmap = "installer/vendor/k8s.io/apimachinery/third_party/forked/golang/json", - importpath = "k8s.io/apimachinery/third_party/forked/golang/json", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/BUILD.bazel b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/BUILD.bazel deleted file mode 100644 index 7a7bc94293a..00000000000 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["addr.go"], - importmap = "installer/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil", - importpath = "k8s.io/apimachinery/third_party/forked/golang/netutil", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD.bazel b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD.bazel deleted file mode 100644 index 48fe197f638..00000000000 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["deep_equal.go"], - importmap = "installer/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect", - importpath = "k8s.io/apimachinery/third_party/forked/golang/reflect", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/BUILD.bazel deleted file mode 100644 index dae41981bb1..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/BUILD.bazel +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "attributes.go", - "chain.go", - "config.go", - "errors.go", - "handler.go", - "interfaces.go", - "plugins.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission", - importpath = "k8s.io/apiserver/pkg/admission", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/ghodss/yaml:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD.bazel deleted file mode 100644 index 643b15cf857..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/configuration/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "configuration_manager.go", - "initializer_manager.go", - "mutating_webhook_manager.go", - "validating_webhook_manager.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/configuration", - importpath = "k8s.io/apiserver/pkg/admission/configuration", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD.bazel deleted file mode 100644 index b61562b9e21..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/initializer/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "initializer.go", - "interfaces.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/initializer", - importpath = "k8s.io/apiserver/pkg/admission/initializer", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/k8s.io/client-go/informers:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/metrics/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/metrics/BUILD.bazel deleted file mode 100644 index b6874b9e449..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/metrics/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["metrics.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/metrics", - importpath = "k8s.io/apiserver/pkg/admission/metrics", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD.bazel deleted file mode 100644 index e9d1fd65f73..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["initialization.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/initialization", - importpath = "k8s.io/apiserver/pkg/admission/plugin/initialization", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/configuration:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/k8s.io/apiserver/pkg/features:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD.bazel deleted file mode 100644 index d337f9bf6b3..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["admission.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle", - importpath = "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/cache:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", - "//vendor/k8s.io/client-go/informers:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD.bazel deleted file mode 100644 index 8794f3d2ef9..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authentication.go", - "client.go", - "kubeconfig.go", - "serviceresolver.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config", - importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/config", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/hashicorp/golang-lru:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/BUILD.bazel deleted file mode 100644 index 8be434a2af2..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission", - importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/BUILD.bazel deleted file mode 100644 index 84c4eeafdac..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1", - importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/BUILD.bazel deleted file mode 100644 index cd3d18a0307..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - "statuserror.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors", - importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/errors", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD.bazel deleted file mode 100644 index 7e06f56b401..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "admission.go", - "doc.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating", - importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/admission/v1beta1:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/configuration:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/metrics:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned:go_default_library", - "//vendor/k8s.io/client-go/informers:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD.bazel deleted file mode 100644 index fc654c8dae4..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "matcher.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace", - importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/namespace", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/BUILD.bazel deleted file mode 100644 index ce6d0dbb74c..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "admissionreview.go", - "doc.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request", - importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/request", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admission/v1beta1:go_default_library", - "//vendor/k8s.io/api/authentication/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD.bazel deleted file mode 100644 index b459b885ef5..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["rules.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules", - importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/rules", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD.bazel deleted file mode 100644 index f901467d598..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "admission.go", - "doc.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating", - importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/validating", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/admission/v1beta1:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/configuration:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/metrics:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned:go_default_library", - "//vendor/k8s.io/client-go/informers:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/BUILD.bazel deleted file mode 100644 index 561f4268aa1..00000000000 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "attributes.go", - "conversion.go", - "doc.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned", - importpath = "k8s.io/apiserver/pkg/admission/plugin/webhook/versioned", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD.bazel deleted file mode 100644 index 9d69bb04a5a..00000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/apis/apiserver", - importpath = "k8s.io/apiserver/pkg/apis/apiserver", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD.bazel deleted file mode 100644 index fd38a42d8b3..00000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["install.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/apis/apiserver/install", - importpath = "k8s.io/apiserver/pkg/apis/apiserver/install", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD.bazel deleted file mode 100644 index 05326ae4052..00000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "doc.go", - "register.go", - "types.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1", - importpath = "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/apiserver:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD.bazel deleted file mode 100644 index b65497465c4..00000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/apis/audit", - importpath = "k8s.io/apiserver/pkg/apis/audit", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD.bazel deleted file mode 100644 index ac9b0e191ab..00000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1", - importpath = "k8s.io/apiserver/pkg/apis/audit/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/authentication/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD.bazel deleted file mode 100644 index 2770faf1c75..00000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "doc.go", - "generated.pb.go", - "register.go", - "types.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1", - importpath = "k8s.io/apiserver/pkg/apis/audit/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/k8s.io/api/authentication/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/validation/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/apis/audit/validation/BUILD.bazel deleted file mode 100644 index 3e7f848eb98..00000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/validation/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["validation.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/apis/audit/validation", - importpath = "k8s.io/apiserver/pkg/apis/audit/validation", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/audit/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/audit/BUILD.bazel deleted file mode 100644 index d9ca07bf985..00000000000 --- a/vendor/k8s.io/apiserver/pkg/audit/BUILD.bazel +++ /dev/null @@ -1,33 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "format.go", - "metrics.go", - "request.go", - "scheme.go", - "types.go", - "union.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/audit", - importpath = "k8s.io/apiserver/pkg/audit", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/pborman/uuid:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/audit/policy/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/audit/policy/BUILD.bazel deleted file mode 100644 index 3a401047ca7..00000000000 --- a/vendor/k8s.io/apiserver/pkg/audit/policy/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "checker.go", - "reader.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/audit/policy", - importpath = "k8s.io/apiserver/pkg/audit/policy", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit/validation:go_default_library", - "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel deleted file mode 100644 index 920e48e3250..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interfaces.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/authenticator", - importpath = "k8s.io/apiserver/pkg/authentication/authenticator", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD.bazel deleted file mode 100644 index 36655e69842..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "delegating.go", - "loopback.go", - "requestheader.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory", - importpath = "k8s.io/apiserver/pkg/authentication/authenticatorfactory", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/group:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/request/anonymous:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/request/union:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/request/websocket:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/request/x509:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/util/cert:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/group/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/group/BUILD.bazel deleted file mode 100644 index 8d2265f0838..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/group/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authenticated_group_adder.go", - "group_adder.go", - "token_group_adder.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/group", - importpath = "k8s.io/apiserver/pkg/authentication/group", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD.bazel deleted file mode 100644 index 7a259ae7629..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["anonymous.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous", - importpath = "k8s.io/apiserver/pkg/authentication/request/anonymous", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD.bazel deleted file mode 100644 index 37d05068ae4..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["bearertoken.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken", - importpath = "k8s.io/apiserver/pkg/authentication/request/bearertoken", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD.bazel deleted file mode 100644 index c771b70b9ec..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["requestheader.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest", - importpath = "k8s.io/apiserver/pkg/authentication/request/headerrequest", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/request/x509:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/k8s.io/client-go/util/cert:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD.bazel deleted file mode 100644 index 3f6de7f5272..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/union/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["union.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/request/union", - importpath = "k8s.io/apiserver/pkg/authentication/request/union", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD.bazel deleted file mode 100644 index 043d0be048c..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["protocol.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/request/websocket", - importpath = "k8s.io/apiserver/pkg/authentication/request/websocket", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/wsstream:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD.bazel deleted file mode 100644 index 113e825e1b1..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "x509.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/request/x509", - importpath = "k8s.io/apiserver/pkg/authentication/request/x509", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD.bazel deleted file mode 100644 index 1e2feded4fe..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["util.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount", - importpath = "k8s.io/apiserver/pkg/authentication/serviceaccount", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library"], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD.bazel deleted file mode 100644 index f75e4aeea77..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["tokenfile.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile", - importpath = "k8s.io/apiserver/pkg/authentication/token/tokenfile", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authentication/user/BUILD.bazel deleted file mode 100644 index bf5a687241c..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/user/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "user.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authentication/user", - importpath = "k8s.io/apiserver/pkg/authentication/user", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/BUILD.bazel deleted file mode 100644 index 5e2e97d9476..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "interfaces.go", - "rule.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authorization/authorizer", - importpath = "k8s.io/apiserver/pkg/authorization/authorizer", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library"], -) diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD.bazel deleted file mode 100644 index 75cc2c76f53..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "builtin.go", - "delegating.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory", - importpath = "k8s.io/apiserver/pkg/authorization/authorizerfactory", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/authorization/union/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/authorization/union/BUILD.bazel deleted file mode 100644 index 44da540ee96..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authorization/union/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["union.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/authorization/union", - importpath = "k8s.io/apiserver/pkg/authorization/union", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/BUILD.bazel deleted file mode 100644 index d06073829f8..00000000000 --- a/vendor/k8s.io/apiserver/pkg/endpoints/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apiserver.go", - "doc.go", - "groupversion.go", - "installer.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/endpoints", - importpath = "k8s.io/apiserver/pkg/endpoints", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/discovery:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/handlers:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD.bazel deleted file mode 100644 index 4c94ba72fe3..00000000000 --- a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "addresses.go", - "group.go", - "legacy.go", - "root.go", - "util.go", - "version.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/endpoints/discovery", - importpath = "k8s.io/apiserver/pkg/endpoints/discovery", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD.bazel deleted file mode 100644 index 544583a3ffb..00000000000 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/BUILD.bazel +++ /dev/null @@ -1,41 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "audit.go", - "authentication.go", - "authn_audit.go", - "authorization.go", - "doc.go", - "impersonation.go", - "legacy_audit.go", - "requestinfo.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/endpoints/filters", - importpath = "k8s.io/apiserver/pkg/endpoints/filters", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/pborman/uuid:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/k8s.io/api/authentication/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", - "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", - "//vendor/k8s.io/apiserver/pkg/audit/policy:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/httplog:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD.bazel deleted file mode 100644 index 581f02a933c..00000000000 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/BUILD.bazel +++ /dev/null @@ -1,54 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "create.go", - "delete.go", - "doc.go", - "get.go", - "namer.go", - "patch.go", - "proxy.go", - "response.go", - "rest.go", - "update.go", - "watch.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/endpoints/handlers", - importpath = "k8s.io/apiserver/pkg/endpoints/handlers", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/net/websocket:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/httpstream:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/proxy:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/httplog:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/wsstream:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD.bazel deleted file mode 100644 index 531dcbc4a2a..00000000000 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - "negotiate.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation", - importpath = "k8s.io/apiserver/pkg/endpoints/handlers/negotiation", - visibility = ["//visibility:public"], - deps = [ - "//vendor/bitbucket.org/ww/goautoneg:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD.bazel deleted file mode 100644 index 26b529dc101..00000000000 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "errors.go", - "status.go", - "writers.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters", - importpath = "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/flushwriter:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/wsstream:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD.bazel deleted file mode 100644 index a7aa5e92d5d..00000000000 --- a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["metrics.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/endpoints/metrics", - importpath = "k8s.io/apiserver/pkg/endpoints/metrics", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/openapi/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/openapi/BUILD.bazel deleted file mode 100644 index 2434775e232..00000000000 --- a/vendor/k8s.io/apiserver/pkg/endpoints/openapi/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["openapi.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/endpoints/openapi", - importpath = "k8s.io/apiserver/pkg/endpoints/openapi", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/util:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel deleted file mode 100644 index 73ca5e47f7c..00000000000 --- a/vendor/k8s.io/apiserver/pkg/endpoints/request/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "doc.go", - "requestcontext.go", - "requestinfo.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/endpoints/request", - importpath = "k8s.io/apiserver/pkg/endpoints/request", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/audit:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/features/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/features/BUILD.bazel deleted file mode 100644 index c1b8e5e70c3..00000000000 --- a/vendor/k8s.io/apiserver/pkg/features/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["kube_features.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/features", - importpath = "k8s.io/apiserver/pkg/features", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library"], -) diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD.bazel deleted file mode 100644 index 1f3b906c931..00000000000 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "matcher.go", - "options.go", - "storage_decorator.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/registry/generic", - importpath = "k8s.io/apiserver/pkg/registry/generic", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD.bazel deleted file mode 100644 index f9ddd9416ff..00000000000 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/BUILD.bazel +++ /dev/null @@ -1,40 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "decorated_watcher.go", - "doc.go", - "storage_factory.go", - "store.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/registry/generic/registry", - importpath = "k8s.io/apiserver/pkg/registry/generic/registry", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/validation/path:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/errors:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/registry/rest/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/registry/rest/BUILD.bazel deleted file mode 100644 index 24d59ee5214..00000000000 --- a/vendor/k8s.io/apiserver/pkg/registry/rest/BUILD.bazel +++ /dev/null @@ -1,39 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "create.go", - "delete.go", - "doc.go", - "export.go", - "meta.go", - "rest.go", - "table.go", - "update.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/registry/rest", - importpath = "k8s.io/apiserver/pkg/registry/rest", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/validation/path:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/features:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/server/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/BUILD.bazel deleted file mode 100644 index 55fcd16840d..00000000000 --- a/vendor/k8s.io/apiserver/pkg/server/BUILD.bazel +++ /dev/null @@ -1,76 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "config.go", - "config_selfclient.go", - "doc.go", - "genericapiserver.go", - "handler.go", - "healthz.go", - "hooks.go", - "plugins.go", - "serve.go", - "signal.go", - "signal_posix.go", - "signal_windows.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/server", - importpath = "k8s.io/apiserver/pkg/server", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/go-systemd/daemon:go_default_library", - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", - "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/pborman/uuid:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/initialization:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating:go_default_library", - "//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating:go_default_library", - "//vendor/k8s.io/apiserver/pkg/apis/apiserver/install:go_default_library", - "//vendor/k8s.io/apiserver/pkg/audit:go_default_library", - "//vendor/k8s.io/apiserver/pkg/audit/policy:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/request/union:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/union:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/discovery:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/filters:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/features:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/mux:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/routes:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/client-go/informers:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/util/cert:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/filters/BUILD.bazel deleted file mode 100644 index da0d9a49d2f..00000000000 --- a/vendor/k8s.io/apiserver/pkg/server/filters/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "compression.go", - "cors.go", - "doc.go", - "longrunning.go", - "maxinflight.go", - "timeout.go", - "waitgroup.go", - "wrap.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/server/filters", - importpath = "k8s.io/apiserver/pkg/server/filters", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/waitgroup:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/httplog:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD.bazel deleted file mode 100644 index e2f120823fc..00000000000 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "healthz.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/server/healthz", - importpath = "k8s.io/apiserver/pkg/server/healthz", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], -) diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD.bazel deleted file mode 100644 index 3aceb2c1ace..00000000000 --- a/vendor/k8s.io/apiserver/pkg/server/httplog/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "httplog.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/server/httplog", - importpath = "k8s.io/apiserver/pkg/server/httplog", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], -) diff --git a/vendor/k8s.io/apiserver/pkg/server/mux/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/mux/BUILD.bazel deleted file mode 100644 index b9402b37df8..00000000000 --- a/vendor/k8s.io/apiserver/pkg/server/mux/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "pathrecorder.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/server/mux", - importpath = "k8s.io/apiserver/pkg/server/mux", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/routes/BUILD.bazel deleted file mode 100644 index 32b935fc0d4..00000000000 --- a/vendor/k8s.io/apiserver/pkg/server/routes/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "index.go", - "metrics.go", - "openapi.go", - "profiling.go", - "swagger.go", - "swaggerui.go", - "version.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/server/routes", - importpath = "k8s.io/apiserver/pkg/server/routes", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/elazarl/go-bindata-assetfs:go_default_library", - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/mux:go_default_library", - "//vendor/k8s.io/apiserver/pkg/server/routes/data/swagger:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd/metrics:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/handler:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/data/swagger/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/server/routes/data/swagger/BUILD.bazel deleted file mode 100644 index a8614da3366..00000000000 --- a/vendor/k8s.io/apiserver/pkg/server/routes/data/swagger/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["datafile.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/server/routes/data/swagger", - importpath = "k8s.io/apiserver/pkg/server/routes/data/swagger", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apiserver/pkg/storage/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/BUILD.bazel deleted file mode 100644 index 9bd51527002..00000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/BUILD.bazel +++ /dev/null @@ -1,40 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cacher.go", - "doc.go", - "errors.go", - "interfaces.go", - "selection_predicate.go", - "time_budget.go", - "util.go", - "watch_cache.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/storage", - importpath = "k8s.io/apiserver/pkg/storage", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/validation/path:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/apiserver/pkg/features:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/errors/BUILD.bazel deleted file mode 100644 index 6c490c2e352..00000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/errors/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "storage.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/storage/errors", - importpath = "k8s.io/apiserver/pkg/storage/errors", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD.bazel deleted file mode 100644 index 2b9e5bcd0de..00000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/BUILD.bazel +++ /dev/null @@ -1,30 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "api_object_versioner.go", - "doc.go", - "etcd_helper.go", - "etcd_watcher.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/storage/etcd", - importpath = "k8s.io/apiserver/pkg/storage/etcd", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/client:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/cache:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd/metrics:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd/util:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/BUILD.bazel deleted file mode 100644 index 28ed94a80c6..00000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["metrics.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/storage/etcd/metrics", - importpath = "k8s.io/apiserver/pkg/storage/etcd/metrics", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"], -) diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD.bazel deleted file mode 100644 index 74d5bbd38ea..00000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd/util/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "etcd_util.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/storage/etcd/util", - importpath = "k8s.io/apiserver/pkg/storage/etcd/util", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/coreos/etcd/client:go_default_library"], -) diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD.bazel deleted file mode 100644 index 101fbe122a0..00000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "compact.go", - "errors.go", - "event.go", - "store.go", - "watcher.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/storage/etcd3", - importpath = "k8s.io/apiserver/pkg/storage/etcd3", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", - "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/trace:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/storage/names/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/names/BUILD.bazel deleted file mode 100644 index 120e6e4cf5c..00000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/names/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["generate.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/storage/names", - importpath = "k8s.io/apiserver/pkg/storage/names", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library"], -) diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/BUILD.bazel deleted file mode 100644 index dcdf49ea599..00000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["config.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/storage/storagebackend", - importpath = "k8s.io/apiserver/pkg/storage/storagebackend", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD.bazel deleted file mode 100644 index 537c6ba8479..00000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "etcd2.go", - "etcd3.go", - "factory.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory", - importpath = "k8s.io/apiserver/pkg/storage/storagebackend/factory", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/coreos/etcd/client:go_default_library", - "//vendor/github.com/coreos/etcd/clientv3:go_default_library", - "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/etcd3:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", - "//vendor/k8s.io/apiserver/pkg/storage/value:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/storage/value/BUILD.bazel deleted file mode 100644 index c240e13f610..00000000000 --- a/vendor/k8s.io/apiserver/pkg/storage/value/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["transformer.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/storage/value", - importpath = "k8s.io/apiserver/pkg/storage/value", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel deleted file mode 100644 index 001f760b7ec..00000000000 --- a/vendor/k8s.io/apiserver/pkg/util/feature/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["feature_gate.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/util/feature", - importpath = "k8s.io/apiserver/pkg/util/feature", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/util/flushwriter/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/flushwriter/BUILD.bazel deleted file mode 100644 index 628520243ab..00000000000 --- a/vendor/k8s.io/apiserver/pkg/util/flushwriter/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "writer.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/util/flushwriter", - importpath = "k8s.io/apiserver/pkg/util/flushwriter", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/apiserver/pkg/util/trace/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/trace/BUILD.bazel deleted file mode 100644 index 6b3fd87f056..00000000000 --- a/vendor/k8s.io/apiserver/pkg/util/trace/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["trace.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/util/trace", - importpath = "k8s.io/apiserver/pkg/util/trace", - visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], -) diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/webhook/BUILD.bazel deleted file mode 100644 index 511cd4e76dd..00000000000 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["webhook.go"], - importmap = "installer/vendor/k8s.io/apiserver/pkg/util/webhook", - importpath = "k8s.io/apiserver/pkg/util/webhook", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD.bazel b/vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD.bazel deleted file mode 100644 index 7abf8e3a57c..00000000000 --- a/vendor/k8s.io/apiserver/pkg/util/wsstream/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conn.go", - "doc.go", - "stream.go", - ], - importmap = "installer/vendor/k8s.io/apiserver/pkg/util/wsstream", - importpath = "k8s.io/apiserver/pkg/util/wsstream", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/net/websocket:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD.bazel b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD.bazel deleted file mode 100644 index f8592c87edf..00000000000 --- a/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["webhook.go"], - importmap = "installer/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook", - importpath = "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/authentication/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/cache:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/webhook:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", - ], -) diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD.bazel b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD.bazel deleted file mode 100644 index 6a36bb9885c..00000000000 --- a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["webhook.go"], - importmap = "installer/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook", - importpath = "k8s.io/apiserver/plugin/pkg/authorizer/webhook", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/authorization/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/cache:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/k8s.io/apiserver/pkg/util/webhook:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/discovery/BUILD.bazel b/vendor/k8s.io/client-go/discovery/BUILD.bazel deleted file mode 100644 index 9c209647330..00000000000 --- a/vendor/k8s.io/client-go/discovery/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "discovery_client.go", - "helper.go", - "restmapper.go", - "unstructured.go", - ], - importmap = "installer/vendor/k8s.io/client-go/discovery", - importpath = "k8s.io/client-go/discovery", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/version:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/BUILD.bazel b/vendor/k8s.io/client-go/informers/BUILD.bazel deleted file mode 100644 index 1b4458b5fb9..00000000000 --- a/vendor/k8s.io/client-go/informers/BUILD.bazel +++ /dev/null @@ -1,58 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "factory.go", - "generic.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers", - importpath = "k8s.io/client-go/informers", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/api/batch/v1beta1:go_default_library", - "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/events/v1beta1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/api/networking/v1:go_default_library", - "//vendor/k8s.io/api/policy/v1beta1:go_default_library", - "//vendor/k8s.io/api/rbac/v1:go_default_library", - "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", - "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", - "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", - "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", - "//vendor/k8s.io/api/storage/v1:go_default_library", - "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", - "//vendor/k8s.io/api/storage/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/client-go/informers/admissionregistration:go_default_library", - "//vendor/k8s.io/client-go/informers/apps:go_default_library", - "//vendor/k8s.io/client-go/informers/autoscaling:go_default_library", - "//vendor/k8s.io/client-go/informers/batch:go_default_library", - "//vendor/k8s.io/client-go/informers/certificates:go_default_library", - "//vendor/k8s.io/client-go/informers/core:go_default_library", - "//vendor/k8s.io/client-go/informers/events:go_default_library", - "//vendor/k8s.io/client-go/informers/extensions:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/informers/networking:go_default_library", - "//vendor/k8s.io/client-go/informers/policy:go_default_library", - "//vendor/k8s.io/client-go/informers/rbac:go_default_library", - "//vendor/k8s.io/client-go/informers/scheduling:go_default_library", - "//vendor/k8s.io/client-go/informers/settings:go_default_library", - "//vendor/k8s.io/client-go/informers/storage:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/BUILD.bazel b/vendor/k8s.io/client-go/informers/admissionregistration/BUILD.bazel deleted file mode 100644 index 814a335bc9b..00000000000 --- a/vendor/k8s.io/client-go/informers/admissionregistration/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/admissionregistration", - importpath = "k8s.io/client-go/informers/admissionregistration", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/informers/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/BUILD.bazel deleted file mode 100644 index b5f0fcb3582..00000000000 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "initializerconfiguration.go", - "interface.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1", - importpath = "k8s.io/client-go/informers/admissionregistration/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/BUILD.bazel deleted file mode 100644 index e82e5ef391f..00000000000 --- a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "interface.go", - "mutatingwebhookconfiguration.go", - "validatingwebhookconfiguration.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1", - importpath = "k8s.io/client-go/informers/admissionregistration/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/apps/BUILD.bazel b/vendor/k8s.io/client-go/informers/apps/BUILD.bazel deleted file mode 100644 index 71cb14eaf27..00000000000 --- a/vendor/k8s.io/client-go/informers/apps/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/apps", - importpath = "k8s.io/client-go/informers/apps", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/apps/v1:go_default_library", - "//vendor/k8s.io/client-go/informers/apps/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/informers/apps/v1beta2:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/apps/v1/BUILD.bazel b/vendor/k8s.io/client-go/informers/apps/v1/BUILD.bazel deleted file mode 100644 index 07ddaada643..00000000000 --- a/vendor/k8s.io/client-go/informers/apps/v1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "controllerrevision.go", - "daemonset.go", - "deployment.go", - "interface.go", - "replicaset.go", - "statefulset.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/apps/v1", - importpath = "k8s.io/client-go/informers/apps/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/apps/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/informers/apps/v1beta1/BUILD.bazel deleted file mode 100644 index 5d2f0eb1967..00000000000 --- a/vendor/k8s.io/client-go/informers/apps/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "controllerrevision.go", - "deployment.go", - "interface.go", - "statefulset.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/apps/v1beta1", - importpath = "k8s.io/client-go/informers/apps/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/apps/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/BUILD.bazel b/vendor/k8s.io/client-go/informers/apps/v1beta2/BUILD.bazel deleted file mode 100644 index fb8f58385ae..00000000000 --- a/vendor/k8s.io/client-go/informers/apps/v1beta2/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "controllerrevision.go", - "daemonset.go", - "deployment.go", - "interface.go", - "replicaset.go", - "statefulset.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/apps/v1beta2", - importpath = "k8s.io/client-go/informers/apps/v1beta2", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/apps/v1beta2:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/autoscaling/BUILD.bazel b/vendor/k8s.io/client-go/informers/autoscaling/BUILD.bazel deleted file mode 100644 index 02379088db9..00000000000 --- a/vendor/k8s.io/client-go/informers/autoscaling/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/autoscaling", - importpath = "k8s.io/client-go/informers/autoscaling", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/autoscaling/v1:go_default_library", - "//vendor/k8s.io/client-go/informers/autoscaling/v2beta1:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v1/BUILD.bazel b/vendor/k8s.io/client-go/informers/autoscaling/v1/BUILD.bazel deleted file mode 100644 index 9a080ebf0c5..00000000000 --- a/vendor/k8s.io/client-go/informers/autoscaling/v1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "horizontalpodautoscaler.go", - "interface.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/autoscaling/v1", - importpath = "k8s.io/client-go/informers/autoscaling/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/autoscaling/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/BUILD.bazel b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/BUILD.bazel deleted file mode 100644 index 6cac46af1ae..00000000000 --- a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "horizontalpodautoscaler.go", - "interface.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/autoscaling/v2beta1", - importpath = "k8s.io/client-go/informers/autoscaling/v2beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/autoscaling/v2beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/batch/BUILD.bazel b/vendor/k8s.io/client-go/informers/batch/BUILD.bazel deleted file mode 100644 index 378a0df3110..00000000000 --- a/vendor/k8s.io/client-go/informers/batch/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/batch", - importpath = "k8s.io/client-go/informers/batch", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/batch/v1:go_default_library", - "//vendor/k8s.io/client-go/informers/batch/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/informers/batch/v2alpha1:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/batch/v1/BUILD.bazel b/vendor/k8s.io/client-go/informers/batch/v1/BUILD.bazel deleted file mode 100644 index e8c56cd8ae8..00000000000 --- a/vendor/k8s.io/client-go/informers/batch/v1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "interface.go", - "job.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/batch/v1", - importpath = "k8s.io/client-go/informers/batch/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/batch/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/batch/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/informers/batch/v1beta1/BUILD.bazel deleted file mode 100644 index 68502f078b5..00000000000 --- a/vendor/k8s.io/client-go/informers/batch/v1beta1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cronjob.go", - "interface.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/batch/v1beta1", - importpath = "k8s.io/client-go/informers/batch/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/batch/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/batch/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/batch/v2alpha1/BUILD.bazel b/vendor/k8s.io/client-go/informers/batch/v2alpha1/BUILD.bazel deleted file mode 100644 index c5e7e12a860..00000000000 --- a/vendor/k8s.io/client-go/informers/batch/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cronjob.go", - "interface.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/batch/v2alpha1", - importpath = "k8s.io/client-go/informers/batch/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/batch/v2alpha1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/certificates/BUILD.bazel b/vendor/k8s.io/client-go/informers/certificates/BUILD.bazel deleted file mode 100644 index 92a6712ef91..00000000000 --- a/vendor/k8s.io/client-go/informers/certificates/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/certificates", - importpath = "k8s.io/client-go/informers/certificates", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/certificates/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/informers/certificates/v1beta1/BUILD.bazel deleted file mode 100644 index b8eb1a27ccf..00000000000 --- a/vendor/k8s.io/client-go/informers/certificates/v1beta1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "certificatesigningrequest.go", - "interface.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/certificates/v1beta1", - importpath = "k8s.io/client-go/informers/certificates/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/core/BUILD.bazel b/vendor/k8s.io/client-go/informers/core/BUILD.bazel deleted file mode 100644 index 9ce979e0d53..00000000000 --- a/vendor/k8s.io/client-go/informers/core/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/core", - importpath = "k8s.io/client-go/informers/core", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/core/v1/BUILD.bazel b/vendor/k8s.io/client-go/informers/core/v1/BUILD.bazel deleted file mode 100644 index eec0f5b8a0b..00000000000 --- a/vendor/k8s.io/client-go/informers/core/v1/BUILD.bazel +++ /dev/null @@ -1,37 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "componentstatus.go", - "configmap.go", - "endpoints.go", - "event.go", - "interface.go", - "limitrange.go", - "namespace.go", - "node.go", - "persistentvolume.go", - "persistentvolumeclaim.go", - "pod.go", - "podtemplate.go", - "replicationcontroller.go", - "resourcequota.go", - "secret.go", - "service.go", - "serviceaccount.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/core/v1", - importpath = "k8s.io/client-go/informers/core/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/events/BUILD.bazel b/vendor/k8s.io/client-go/informers/events/BUILD.bazel deleted file mode 100644 index d39735075bf..00000000000 --- a/vendor/k8s.io/client-go/informers/events/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/events", - importpath = "k8s.io/client-go/informers/events", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/events/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/events/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/informers/events/v1beta1/BUILD.bazel deleted file mode 100644 index df92ebacde4..00000000000 --- a/vendor/k8s.io/client-go/informers/events/v1beta1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "event.go", - "interface.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/events/v1beta1", - importpath = "k8s.io/client-go/informers/events/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/events/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/events/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/extensions/BUILD.bazel b/vendor/k8s.io/client-go/informers/extensions/BUILD.bazel deleted file mode 100644 index fd13a5b50a1..00000000000 --- a/vendor/k8s.io/client-go/informers/extensions/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/extensions", - importpath = "k8s.io/client-go/informers/extensions", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/informers/extensions/v1beta1/BUILD.bazel deleted file mode 100644 index 9db39b1a721..00000000000 --- a/vendor/k8s.io/client-go/informers/extensions/v1beta1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "deployment.go", - "ingress.go", - "interface.go", - "podsecuritypolicy.go", - "replicaset.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/extensions/v1beta1", - importpath = "k8s.io/client-go/informers/extensions/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/internalinterfaces/BUILD.bazel b/vendor/k8s.io/client-go/informers/internalinterfaces/BUILD.bazel deleted file mode 100644 index 568ec309db8..00000000000 --- a/vendor/k8s.io/client-go/informers/internalinterfaces/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["factory_interfaces.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/internalinterfaces", - importpath = "k8s.io/client-go/informers/internalinterfaces", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/networking/BUILD.bazel b/vendor/k8s.io/client-go/informers/networking/BUILD.bazel deleted file mode 100644 index 91ccf419922..00000000000 --- a/vendor/k8s.io/client-go/informers/networking/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/networking", - importpath = "k8s.io/client-go/informers/networking", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/informers/networking/v1:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/networking/v1/BUILD.bazel b/vendor/k8s.io/client-go/informers/networking/v1/BUILD.bazel deleted file mode 100644 index 1b1a479e01e..00000000000 --- a/vendor/k8s.io/client-go/informers/networking/v1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "interface.go", - "networkpolicy.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/networking/v1", - importpath = "k8s.io/client-go/informers/networking/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/networking/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/networking/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/policy/BUILD.bazel b/vendor/k8s.io/client-go/informers/policy/BUILD.bazel deleted file mode 100644 index 2892108bfe8..00000000000 --- a/vendor/k8s.io/client-go/informers/policy/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/policy", - importpath = "k8s.io/client-go/informers/policy", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/informers/policy/v1beta1:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/informers/policy/v1beta1/BUILD.bazel deleted file mode 100644 index a794af698e5..00000000000 --- a/vendor/k8s.io/client-go/informers/policy/v1beta1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "interface.go", - "poddisruptionbudget.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/policy/v1beta1", - importpath = "k8s.io/client-go/informers/policy/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/policy/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/policy/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/rbac/BUILD.bazel b/vendor/k8s.io/client-go/informers/rbac/BUILD.bazel deleted file mode 100644 index 64dbb4e5402..00000000000 --- a/vendor/k8s.io/client-go/informers/rbac/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/rbac", - importpath = "k8s.io/client-go/informers/rbac", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/informers/rbac/v1:go_default_library", - "//vendor/k8s.io/client-go/informers/rbac/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/informers/rbac/v1beta1:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/BUILD.bazel b/vendor/k8s.io/client-go/informers/rbac/v1/BUILD.bazel deleted file mode 100644 index 672f62b4186..00000000000 --- a/vendor/k8s.io/client-go/informers/rbac/v1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "interface.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/rbac/v1", - importpath = "k8s.io/client-go/informers/rbac/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/rbac/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/rbac/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/BUILD.bazel deleted file mode 100644 index d68f3023b4e..00000000000 --- a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "interface.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/rbac/v1alpha1", - importpath = "k8s.io/client-go/informers/rbac/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/rbac/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/informers/rbac/v1beta1/BUILD.bazel deleted file mode 100644 index 7b686c30715..00000000000 --- a/vendor/k8s.io/client-go/informers/rbac/v1beta1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "interface.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/rbac/v1beta1", - importpath = "k8s.io/client-go/informers/rbac/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/rbac/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/scheduling/BUILD.bazel b/vendor/k8s.io/client-go/informers/scheduling/BUILD.bazel deleted file mode 100644 index 108161a7ce1..00000000000 --- a/vendor/k8s.io/client-go/informers/scheduling/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/scheduling", - importpath = "k8s.io/client-go/informers/scheduling", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/informers/scheduling/v1alpha1:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/BUILD.bazel deleted file mode 100644 index c13111d5360..00000000000 --- a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "interface.go", - "priorityclass.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/scheduling/v1alpha1", - importpath = "k8s.io/client-go/informers/scheduling/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/scheduling/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/settings/BUILD.bazel b/vendor/k8s.io/client-go/informers/settings/BUILD.bazel deleted file mode 100644 index 56779281163..00000000000 --- a/vendor/k8s.io/client-go/informers/settings/BUILD.bazel +++ /dev/null @@ -1,13 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/settings", - importpath = "k8s.io/client-go/informers/settings", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/informers/settings/v1alpha1:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/informers/settings/v1alpha1/BUILD.bazel deleted file mode 100644 index 9e23d88c0b7..00000000000 --- a/vendor/k8s.io/client-go/informers/settings/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "interface.go", - "podpreset.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/settings/v1alpha1", - importpath = "k8s.io/client-go/informers/settings/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/settings/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/storage/BUILD.bazel b/vendor/k8s.io/client-go/informers/storage/BUILD.bazel deleted file mode 100644 index e52e360e724..00000000000 --- a/vendor/k8s.io/client-go/informers/storage/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["interface.go"], - importmap = "installer/vendor/k8s.io/client-go/informers/storage", - importpath = "k8s.io/client-go/informers/storage", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/informers/storage/v1:go_default_library", - "//vendor/k8s.io/client-go/informers/storage/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/informers/storage/v1beta1:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/storage/v1/BUILD.bazel b/vendor/k8s.io/client-go/informers/storage/v1/BUILD.bazel deleted file mode 100644 index 07c92dbeac8..00000000000 --- a/vendor/k8s.io/client-go/informers/storage/v1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "interface.go", - "storageclass.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/storage/v1", - importpath = "k8s.io/client-go/informers/storage/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/storage/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/storage/v1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/informers/storage/v1alpha1/BUILD.bazel deleted file mode 100644 index 39b8daaf5c2..00000000000 --- a/vendor/k8s.io/client-go/informers/storage/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "interface.go", - "volumeattachment.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/storage/v1alpha1", - importpath = "k8s.io/client-go/informers/storage/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/storage/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/informers/storage/v1beta1/BUILD.bazel deleted file mode 100644 index 6505e78dbab..00000000000 --- a/vendor/k8s.io/client-go/informers/storage/v1beta1/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "interface.go", - "storageclass.go", - ], - importmap = "installer/vendor/k8s.io/client-go/informers/storage/v1beta1", - importpath = "k8s.io/client-go/informers/storage/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/storage/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/informers/internalinterfaces:go_default_library", - "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/listers/storage/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/BUILD.bazel deleted file mode 100644 index c390328e2a5..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/BUILD.bazel +++ /dev/null @@ -1,47 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - "import.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes", - importpath = "k8s.io/client-go/kubernetes", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/networking/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/storage/v1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel deleted file mode 100644 index ea565d970d5..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/scheme/BUILD.bazel +++ /dev/null @@ -1,46 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/scheme", - importpath = "k8s.io/client-go/kubernetes/scheme", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", - "//vendor/k8s.io/api/authentication/v1:go_default_library", - "//vendor/k8s.io/api/authentication/v1beta1:go_default_library", - "//vendor/k8s.io/api/authorization/v1:go_default_library", - "//vendor/k8s.io/api/authorization/v1beta1:go_default_library", - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/api/batch/v1beta1:go_default_library", - "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/events/v1beta1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/api/networking/v1:go_default_library", - "//vendor/k8s.io/api/policy/v1beta1:go_default_library", - "//vendor/k8s.io/api/rbac/v1:go_default_library", - "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", - "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", - "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", - "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", - "//vendor/k8s.io/api/storage/v1:go_default_library", - "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", - "//vendor/k8s.io/api/storage/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD.bazel deleted file mode 100644 index 25efcb9049e..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "admissionregistration_client.go", - "doc.go", - "generated_expansion.go", - "initializerconfiguration.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", - importpath = "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/BUILD.bazel deleted file mode 100644 index 93b4926e4dc..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "admissionregistration_client.go", - "doc.go", - "generated_expansion.go", - "mutatingwebhookconfiguration.go", - "validatingwebhookconfiguration.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD.bazel deleted file mode 100644 index dee7d210875..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apps_client.go", - "controllerrevision.go", - "daemonset.go", - "deployment.go", - "doc.go", - "generated_expansion.go", - "replicaset.go", - "statefulset.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/apps/v1", - importpath = "k8s.io/client-go/kubernetes/typed/apps/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel deleted file mode 100644 index 7f0442249cb..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apps_client.go", - "controllerrevision.go", - "deployment.go", - "doc.go", - "generated_expansion.go", - "scale.go", - "statefulset.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/apps/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD.bazel deleted file mode 100644 index c6f932ab9a4..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "apps_client.go", - "controllerrevision.go", - "daemonset.go", - "deployment.go", - "doc.go", - "generated_expansion.go", - "replicaset.go", - "scale.go", - "statefulset.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2", - importpath = "k8s.io/client-go/kubernetes/typed/apps/v1beta2", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD.bazel deleted file mode 100644 index e7895d4eb48..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authentication_client.go", - "doc.go", - "generated_expansion.go", - "tokenreview.go", - "tokenreview_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1", - importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/authentication/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD.bazel deleted file mode 100644 index 9128497b3b7..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authentication_client.go", - "doc.go", - "generated_expansion.go", - "tokenreview.go", - "tokenreview_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/authentication/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD.bazel deleted file mode 100644 index 2d0297876e2..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authorization_client.go", - "doc.go", - "generated_expansion.go", - "localsubjectaccessreview.go", - "localsubjectaccessreview_expansion.go", - "selfsubjectaccessreview.go", - "selfsubjectaccessreview_expansion.go", - "selfsubjectrulesreview.go", - "selfsubjectrulesreview_expansion.go", - "subjectaccessreview.go", - "subjectaccessreview_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1", - importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/authorization/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD.bazel deleted file mode 100644 index 72a9a1fcb7e..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/BUILD.bazel +++ /dev/null @@ -1,27 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "authorization_client.go", - "doc.go", - "generated_expansion.go", - "localsubjectaccessreview.go", - "localsubjectaccessreview_expansion.go", - "selfsubjectaccessreview.go", - "selfsubjectaccessreview_expansion.go", - "selfsubjectrulesreview.go", - "selfsubjectrulesreview_expansion.go", - "subjectaccessreview.go", - "subjectaccessreview_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/authorization/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD.bazel deleted file mode 100644 index b477e15fc13..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "autoscaling_client.go", - "doc.go", - "generated_expansion.go", - "horizontalpodautoscaler.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1", - importpath = "k8s.io/client-go/kubernetes/typed/autoscaling/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/BUILD.bazel deleted file mode 100644 index e733059e56b..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "autoscaling_client.go", - "doc.go", - "generated_expansion.go", - "horizontalpodautoscaler.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1", - importpath = "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD.bazel deleted file mode 100644 index 35e619a782a..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "doc.go", - "generated_expansion.go", - "job.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/batch/v1", - importpath = "k8s.io/client-go/kubernetes/typed/batch/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/BUILD.bazel deleted file mode 100644 index e1fa31d4d0e..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "cronjob.go", - "doc.go", - "generated_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/batch/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/batch/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD.bazel deleted file mode 100644 index 23c52e01097..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "batch_client.go", - "cronjob.go", - "doc.go", - "generated_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - importpath = "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD.bazel deleted file mode 100644 index 5d197f1aa38..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "certificates_client.go", - "certificatesigningrequest.go", - "certificatesigningrequest_expansion.go", - "doc.go", - "generated_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel deleted file mode 100644 index aac1a7563ff..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/BUILD.bazel +++ /dev/null @@ -1,49 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "componentstatus.go", - "configmap.go", - "core_client.go", - "doc.go", - "endpoints.go", - "event.go", - "event_expansion.go", - "generated_expansion.go", - "limitrange.go", - "namespace.go", - "namespace_expansion.go", - "node.go", - "node_expansion.go", - "persistentvolume.go", - "persistentvolumeclaim.go", - "pod.go", - "pod_expansion.go", - "podtemplate.go", - "replicationcontroller.go", - "resourcequota.go", - "secret.go", - "service.go", - "service_expansion.go", - "serviceaccount.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/core/v1", - importpath = "k8s.io/client-go/kubernetes/typed/core/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/api/policy/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/tools/reference:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/BUILD.bazel deleted file mode 100644 index df9f25e3b3c..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "event.go", - "events_client.go", - "generated_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/events/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/events/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel deleted file mode 100644 index 8d53fc70bbf..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/BUILD.bazel +++ /dev/null @@ -1,32 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "deployment.go", - "deployment_expansion.go", - "doc.go", - "extensions_client.go", - "generated_expansion.go", - "ingress.go", - "podsecuritypolicy.go", - "replicaset.go", - "scale.go", - "scale_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD.bazel deleted file mode 100644 index 84998cadaa8..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "networking_client.go", - "networkpolicy.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/networking/v1", - importpath = "k8s.io/client-go/kubernetes/typed/networking/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/networking/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD.bazel deleted file mode 100644 index a7d4229d9e2..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "eviction.go", - "eviction_expansion.go", - "generated_expansion.go", - "poddisruptionbudget.go", - "policy_client.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/policy/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/policy/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/BUILD.bazel deleted file mode 100644 index a9b1bad2a7d..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "doc.go", - "generated_expansion.go", - "rbac_client.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1", - importpath = "k8s.io/client-go/kubernetes/typed/rbac/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/rbac/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD.bazel deleted file mode 100644 index beb865bbbf1..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "doc.go", - "generated_expansion.go", - "rbac_client.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - importpath = "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD.bazel deleted file mode 100644 index c6a3121235c..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "doc.go", - "generated_expansion.go", - "rbac_client.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/BUILD.bazel deleted file mode 100644 index 33383a2a827..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "priorityclass.go", - "scheduling_client.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1", - importpath = "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD.bazel deleted file mode 100644 index 43adbc9202b..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "podpreset.go", - "settings_client.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - importpath = "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel deleted file mode 100644 index 517e9e7cf38..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "storage_client.go", - "storageclass.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/storage/v1", - importpath = "k8s.io/client-go/kubernetes/typed/storage/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/storage/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/BUILD.bazel deleted file mode 100644 index ca8796decc1..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "storage_client.go", - "volumeattachment.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1", - importpath = "k8s.io/client-go/kubernetes/typed/storage/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD.bazel deleted file mode 100644 index da42d037061..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "generated_expansion.go", - "storage_client.go", - "storageclass.go", - ], - importmap = "installer/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1", - importpath = "k8s.io/client-go/kubernetes/typed/storage/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/storage/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/BUILD.bazel deleted file mode 100644 index 2133f370892..00000000000 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "initializerconfiguration.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1", - importpath = "k8s.io/client-go/listers/admissionregistration/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/BUILD.bazel deleted file mode 100644 index 54e54b0f7bc..00000000000 --- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "mutatingwebhookconfiguration.go", - "validatingwebhookconfiguration.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1", - importpath = "k8s.io/client-go/listers/admissionregistration/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/apps/v1/BUILD.bazel b/vendor/k8s.io/client-go/listers/apps/v1/BUILD.bazel deleted file mode 100644 index 609efd3f9db..00000000000 --- a/vendor/k8s.io/client-go/listers/apps/v1/BUILD.bazel +++ /dev/null @@ -1,28 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "controllerrevision.go", - "daemonset.go", - "daemonset_expansion.go", - "deployment.go", - "deployment_expansion.go", - "expansion_generated.go", - "replicaset.go", - "replicaset_expansion.go", - "statefulset.go", - "statefulset_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/apps/v1", - importpath = "k8s.io/client-go/listers/apps/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD.bazel deleted file mode 100644 index 3dad6148012..00000000000 --- a/vendor/k8s.io/client-go/listers/apps/v1beta1/BUILD.bazel +++ /dev/null @@ -1,24 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "controllerrevision.go", - "deployment.go", - "expansion_generated.go", - "scale.go", - "statefulset.go", - "statefulset_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/apps/v1beta1", - importpath = "k8s.io/client-go/listers/apps/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD.bazel b/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD.bazel deleted file mode 100644 index 337d28c61fd..00000000000 --- a/vendor/k8s.io/client-go/listers/apps/v1beta2/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "controllerrevision.go", - "daemonset.go", - "daemonset_expansion.go", - "deployment.go", - "deployment_expansion.go", - "expansion_generated.go", - "replicaset.go", - "replicaset_expansion.go", - "scale.go", - "statefulset.go", - "statefulset_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/apps/v1beta2", - importpath = "k8s.io/client-go/listers/apps/v1beta2", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1beta2:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v1/BUILD.bazel b/vendor/k8s.io/client-go/listers/autoscaling/v1/BUILD.bazel deleted file mode 100644 index d660bd71cd5..00000000000 --- a/vendor/k8s.io/client-go/listers/autoscaling/v1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "horizontalpodautoscaler.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/autoscaling/v1", - importpath = "k8s.io/client-go/listers/autoscaling/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/autoscaling/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/BUILD.bazel deleted file mode 100644 index e80e83cc8f9..00000000000 --- a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "horizontalpodautoscaler.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/autoscaling/v2beta1", - importpath = "k8s.io/client-go/listers/autoscaling/v2beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/batch/v1/BUILD.bazel b/vendor/k8s.io/client-go/listers/batch/v1/BUILD.bazel deleted file mode 100644 index 19afe90108a..00000000000 --- a/vendor/k8s.io/client-go/listers/batch/v1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "job.go", - "job_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/batch/v1", - importpath = "k8s.io/client-go/listers/batch/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/batch/v1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/batch/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/batch/v1beta1/BUILD.bazel deleted file mode 100644 index 9f07896e3ad..00000000000 --- a/vendor/k8s.io/client-go/listers/batch/v1beta1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cronjob.go", - "expansion_generated.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/batch/v1beta1", - importpath = "k8s.io/client-go/listers/batch/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/batch/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/batch/v2alpha1/BUILD.bazel b/vendor/k8s.io/client-go/listers/batch/v2alpha1/BUILD.bazel deleted file mode 100644 index a4bae44ad9c..00000000000 --- a/vendor/k8s.io/client-go/listers/batch/v2alpha1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cronjob.go", - "expansion_generated.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/batch/v2alpha1", - importpath = "k8s.io/client-go/listers/batch/v2alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/batch/v2alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/certificates/v1beta1/BUILD.bazel deleted file mode 100644 index 3d8a3377df3..00000000000 --- a/vendor/k8s.io/client-go/listers/certificates/v1beta1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "certificatesigningrequest.go", - "expansion_generated.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/certificates/v1beta1", - importpath = "k8s.io/client-go/listers/certificates/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/certificates/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/core/v1/BUILD.bazel b/vendor/k8s.io/client-go/listers/core/v1/BUILD.bazel deleted file mode 100644 index dd68c385950..00000000000 --- a/vendor/k8s.io/client-go/listers/core/v1/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "componentstatus.go", - "configmap.go", - "endpoints.go", - "event.go", - "expansion_generated.go", - "limitrange.go", - "namespace.go", - "node.go", - "node_expansion.go", - "persistentvolume.go", - "persistentvolumeclaim.go", - "pod.go", - "podtemplate.go", - "replicationcontroller.go", - "replicationcontroller_expansion.go", - "resourcequota.go", - "secret.go", - "service.go", - "service_expansion.go", - "serviceaccount.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/core/v1", - importpath = "k8s.io/client-go/listers/core/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/events/v1beta1/BUILD.bazel deleted file mode 100644 index 476a95f8fd3..00000000000 --- a/vendor/k8s.io/client-go/listers/events/v1beta1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "event.go", - "expansion_generated.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/events/v1beta1", - importpath = "k8s.io/client-go/listers/events/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/events/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD.bazel deleted file mode 100644 index 1ab109de582..00000000000 --- a/vendor/k8s.io/client-go/listers/extensions/v1beta1/BUILD.bazel +++ /dev/null @@ -1,29 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "daemonset.go", - "daemonset_expansion.go", - "deployment.go", - "deployment_expansion.go", - "expansion_generated.go", - "ingress.go", - "podsecuritypolicy.go", - "replicaset.go", - "replicaset_expansion.go", - "scale.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/extensions/v1beta1", - importpath = "k8s.io/client-go/listers/extensions/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/apps/v1beta1:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/networking/v1/BUILD.bazel b/vendor/k8s.io/client-go/listers/networking/v1/BUILD.bazel deleted file mode 100644 index a2154516774..00000000000 --- a/vendor/k8s.io/client-go/listers/networking/v1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "networkpolicy.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/networking/v1", - importpath = "k8s.io/client-go/listers/networking/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/networking/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD.bazel deleted file mode 100644 index f3b5b5b5509..00000000000 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "eviction.go", - "expansion_generated.go", - "poddisruptionbudget.go", - "poddisruptionbudget_expansion.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/policy/v1beta1", - importpath = "k8s.io/client-go/listers/policy/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/api/policy/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/BUILD.bazel b/vendor/k8s.io/client-go/listers/rbac/v1/BUILD.bazel deleted file mode 100644 index b6a31bca6bd..00000000000 --- a/vendor/k8s.io/client-go/listers/rbac/v1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "expansion_generated.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/rbac/v1", - importpath = "k8s.io/client-go/listers/rbac/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/rbac/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/BUILD.bazel deleted file mode 100644 index 78a45d8892d..00000000000 --- a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "expansion_generated.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/rbac/v1alpha1", - importpath = "k8s.io/client-go/listers/rbac/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/rbac/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/rbac/v1beta1/BUILD.bazel deleted file mode 100644 index 188c15bd5a1..00000000000 --- a/vendor/k8s.io/client-go/listers/rbac/v1beta1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clusterrole.go", - "clusterrolebinding.go", - "expansion_generated.go", - "role.go", - "rolebinding.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/rbac/v1beta1", - importpath = "k8s.io/client-go/listers/rbac/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/rbac/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/BUILD.bazel deleted file mode 100644 index e469c7fab15..00000000000 --- a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "priorityclass.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/scheduling/v1alpha1", - importpath = "k8s.io/client-go/listers/scheduling/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/settings/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/listers/settings/v1alpha1/BUILD.bazel deleted file mode 100644 index 45d75e858fa..00000000000 --- a/vendor/k8s.io/client-go/listers/settings/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "podpreset.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/settings/v1alpha1", - importpath = "k8s.io/client-go/listers/settings/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/settings/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/storage/v1/BUILD.bazel b/vendor/k8s.io/client-go/listers/storage/v1/BUILD.bazel deleted file mode 100644 index 5e01eb8340e..00000000000 --- a/vendor/k8s.io/client-go/listers/storage/v1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "storageclass.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/storage/v1", - importpath = "k8s.io/client-go/listers/storage/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/storage/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/BUILD.bazel b/vendor/k8s.io/client-go/listers/storage/v1alpha1/BUILD.bazel deleted file mode 100644 index c227acb2ff0..00000000000 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "volumeattachment.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/storage/v1alpha1", - importpath = "k8s.io/client-go/listers/storage/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/storage/v1alpha1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/BUILD.bazel b/vendor/k8s.io/client-go/listers/storage/v1beta1/BUILD.bazel deleted file mode 100644 index 87508160af2..00000000000 --- a/vendor/k8s.io/client-go/listers/storage/v1beta1/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "expansion_generated.go", - "storageclass.go", - ], - importmap = "installer/vendor/k8s.io/client-go/listers/storage/v1beta1", - importpath = "k8s.io/client-go/listers/storage/v1beta1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/storage/v1beta1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/client-go/tools/cache:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/pkg/version/BUILD.bazel b/vendor/k8s.io/client-go/pkg/version/BUILD.bazel deleted file mode 100644 index 7055db1378f..00000000000 --- a/vendor/k8s.io/client-go/pkg/version/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "base.go", - "doc.go", - "version.go", - ], - importmap = "installer/vendor/k8s.io/client-go/pkg/version", - importpath = "k8s.io/client-go/pkg/version", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/apimachinery/pkg/version:go_default_library"], -) diff --git a/vendor/k8s.io/client-go/rest/BUILD.bazel b/vendor/k8s.io/client-go/rest/BUILD.bazel deleted file mode 100644 index 1737fb2e843..00000000000 --- a/vendor/k8s.io/client-go/rest/BUILD.bazel +++ /dev/null @@ -1,40 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "client.go", - "config.go", - "plugin.go", - "request.go", - "transport.go", - "url_utils.go", - "urlbackoff.go", - "versions.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/client-go/rest", - importpath = "k8s.io/client-go/rest", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/net/http2:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/pkg/version:go_default_library", - "//vendor/k8s.io/client-go/rest/watch:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", - "//vendor/k8s.io/client-go/tools/metrics:go_default_library", - "//vendor/k8s.io/client-go/transport:go_default_library", - "//vendor/k8s.io/client-go/util/cert:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/rest/watch/BUILD.bazel b/vendor/k8s.io/client-go/rest/watch/BUILD.bazel deleted file mode 100644 index db2a9ea74c2..00000000000 --- a/vendor/k8s.io/client-go/rest/watch/BUILD.bazel +++ /dev/null @@ -1,18 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "decoder.go", - "encoder.go", - ], - importmap = "installer/vendor/k8s.io/client-go/rest/watch", - importpath = "k8s.io/client-go/rest/watch", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/tools/auth/BUILD.bazel b/vendor/k8s.io/client-go/tools/auth/BUILD.bazel deleted file mode 100644 index ee749489820..00000000000 --- a/vendor/k8s.io/client-go/tools/auth/BUILD.bazel +++ /dev/null @@ -1,10 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["clientauth.go"], - importmap = "installer/vendor/k8s.io/client-go/tools/auth", - importpath = "k8s.io/client-go/tools/auth", - visibility = ["//visibility:public"], - deps = ["//vendor/k8s.io/client-go/rest:go_default_library"], -) diff --git a/vendor/k8s.io/client-go/tools/cache/BUILD.bazel b/vendor/k8s.io/client-go/tools/cache/BUILD.bazel deleted file mode 100644 index 25b63e5e38b..00000000000 --- a/vendor/k8s.io/client-go/tools/cache/BUILD.bazel +++ /dev/null @@ -1,50 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "controller.go", - "delta_fifo.go", - "doc.go", - "expiration_cache.go", - "expiration_cache_fakes.go", - "fake_custom_store.go", - "fifo.go", - "heap.go", - "index.go", - "listers.go", - "listwatch.go", - "mutation_cache.go", - "mutation_detector.go", - "reflector.go", - "reflector_metrics.go", - "shared_informer.go", - "store.go", - "thread_safe_store.go", - "undelta_store.go", - ], - importmap = "installer/vendor/k8s.io/client-go/tools/cache", - importpath = "k8s.io/client-go/tools/cache", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/cache:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/tools/pager:go_default_library", - "//vendor/k8s.io/client-go/util/buffer:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel b/vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel deleted file mode 100644 index 0194cea10b7..00000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "auth_loaders.go", - "client_config.go", - "config.go", - "doc.go", - "flag.go", - "helpers.go", - "loader.go", - "merged_client_builder.go", - "overrides.go", - "validation.go", - ], - importmap = "installer/vendor/k8s.io/client-go/tools/clientcmd", - importpath = "k8s.io/client-go/tools/clientcmd", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/howeyc/gopass:go_default_library", - "//vendor/github.com/imdario/mergo:go_default_library", - "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/tools/auth:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd/api/latest:go_default_library", - "//vendor/k8s.io/client-go/util/homedir:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD.bazel b/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD.bazel deleted file mode 100644 index ff44f794ec3..00000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "helpers.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/client-go/tools/clientcmd/api", - importpath = "k8s.io/client-go/tools/clientcmd/api", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD.bazel b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD.bazel deleted file mode 100644 index 61376f76c9a..00000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["latest.go"], - importmap = "installer/vendor/k8s.io/client-go/tools/clientcmd/api/latest", - importpath = "k8s.io/client-go/tools/clientcmd/api/latest", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd/api/v1:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD.bazel b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD.bazel deleted file mode 100644 index 675d5e5427a..00000000000 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "conversion.go", - "doc.go", - "register.go", - "types.go", - "zz_generated.deepcopy.go", - ], - importmap = "installer/vendor/k8s.io/client-go/tools/clientcmd/api/v1", - importpath = "k8s.io/client-go/tools/clientcmd/api/v1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/tools/metrics/BUILD.bazel b/vendor/k8s.io/client-go/tools/metrics/BUILD.bazel deleted file mode 100644 index f11da9b3141..00000000000 --- a/vendor/k8s.io/client-go/tools/metrics/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["metrics.go"], - importmap = "installer/vendor/k8s.io/client-go/tools/metrics", - importpath = "k8s.io/client-go/tools/metrics", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/client-go/tools/pager/BUILD.bazel b/vendor/k8s.io/client-go/tools/pager/BUILD.bazel deleted file mode 100644 index 21e2a4a437c..00000000000 --- a/vendor/k8s.io/client-go/tools/pager/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["pager.go"], - importmap = "installer/vendor/k8s.io/client-go/tools/pager", - importpath = "k8s.io/client-go/tools/pager", - visibility = ["//visibility:public"], - deps = [ - "//vendor/golang.org/x/net/context:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/tools/reference/BUILD.bazel b/vendor/k8s.io/client-go/tools/reference/BUILD.bazel deleted file mode 100644 index 6c85e4d4660..00000000000 --- a/vendor/k8s.io/client-go/tools/reference/BUILD.bazel +++ /dev/null @@ -1,15 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["ref.go"], - importmap = "installer/vendor/k8s.io/client-go/tools/reference", - importpath = "k8s.io/client-go/tools/reference", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/transport/BUILD.bazel b/vendor/k8s.io/client-go/transport/BUILD.bazel deleted file mode 100644 index 077c15e4c82..00000000000 --- a/vendor/k8s.io/client-go/transport/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cache.go", - "config.go", - "round_trippers.go", - "transport.go", - ], - importmap = "installer/vendor/k8s.io/client-go/transport", - importpath = "k8s.io/client-go/transport", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/gregjones/httpcache:go_default_library", - "//vendor/github.com/gregjones/httpcache/diskcache:go_default_library", - "//vendor/github.com/peterbourgon/diskv:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/util/buffer/BUILD.bazel b/vendor/k8s.io/client-go/util/buffer/BUILD.bazel deleted file mode 100644 index 1896998f5dd..00000000000 --- a/vendor/k8s.io/client-go/util/buffer/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["ring_growing.go"], - importmap = "installer/vendor/k8s.io/client-go/util/buffer", - importpath = "k8s.io/client-go/util/buffer", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/client-go/util/cert/BUILD.bazel b/vendor/k8s.io/client-go/util/cert/BUILD.bazel deleted file mode 100644 index f516d426263..00000000000 --- a/vendor/k8s.io/client-go/util/cert/BUILD.bazel +++ /dev/null @@ -1,14 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cert.go", - "csr.go", - "io.go", - "pem.go", - ], - importmap = "installer/vendor/k8s.io/client-go/util/cert", - importpath = "k8s.io/client-go/util/cert", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/client-go/util/flowcontrol/BUILD.bazel b/vendor/k8s.io/client-go/util/flowcontrol/BUILD.bazel deleted file mode 100644 index eb726b2d180..00000000000 --- a/vendor/k8s.io/client-go/util/flowcontrol/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "backoff.go", - "throttle.go", - ], - importmap = "installer/vendor/k8s.io/client-go/util/flowcontrol", - importpath = "k8s.io/client-go/util/flowcontrol", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/juju/ratelimit:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library", - "//vendor/k8s.io/client-go/util/integer:go_default_library", - ], -) diff --git a/vendor/k8s.io/client-go/util/homedir/BUILD.bazel b/vendor/k8s.io/client-go/util/homedir/BUILD.bazel deleted file mode 100644 index 27ce3a1f8a9..00000000000 --- a/vendor/k8s.io/client-go/util/homedir/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["homedir.go"], - importmap = "installer/vendor/k8s.io/client-go/util/homedir", - importpath = "k8s.io/client-go/util/homedir", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/client-go/util/integer/BUILD.bazel b/vendor/k8s.io/client-go/util/integer/BUILD.bazel deleted file mode 100644 index 489b632bccc..00000000000 --- a/vendor/k8s.io/client-go/util/integer/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["integer.go"], - importmap = "installer/vendor/k8s.io/client-go/util/integer", - importpath = "k8s.io/client-go/util/integer", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/BUILD.bazel b/vendor/k8s.io/kube-openapi/pkg/builder/BUILD.bazel deleted file mode 100644 index 64b0343c0f7..00000000000 --- a/vendor/k8s.io/kube-openapi/pkg/builder/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "openapi.go", - "util.go", - ], - importmap = "installer/vendor/k8s.io/kube-openapi/pkg/builder", - importpath = "k8s.io/kube-openapi/pkg/builder", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/util:go_default_library", - ], -) diff --git a/vendor/k8s.io/kube-openapi/pkg/common/BUILD.bazel b/vendor/k8s.io/kube-openapi/pkg/common/BUILD.bazel deleted file mode 100644 index 9a462439b39..00000000000 --- a/vendor/k8s.io/kube-openapi/pkg/common/BUILD.bazel +++ /dev/null @@ -1,16 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "common.go", - "doc.go", - ], - importmap = "installer/vendor/k8s.io/kube-openapi/pkg/common", - importpath = "k8s.io/kube-openapi/pkg/common", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/go-openapi/spec:go_default_library", - ], -) diff --git a/vendor/k8s.io/kube-openapi/pkg/handler/BUILD.bazel b/vendor/k8s.io/kube-openapi/pkg/handler/BUILD.bazel deleted file mode 100644 index 97fd2a36c67..00000000000 --- a/vendor/k8s.io/kube-openapi/pkg/handler/BUILD.bazel +++ /dev/null @@ -1,20 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["handler.go"], - importmap = "installer/vendor/k8s.io/kube-openapi/pkg/handler", - importpath = "k8s.io/kube-openapi/pkg/handler", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/NYTimes/gziphandler:go_default_library", - "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/golang/protobuf/proto:go_default_library", - "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", - "//vendor/github.com/googleapis/gnostic/compiler:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/builder:go_default_library", - "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", - ], -) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/BUILD.bazel b/vendor/k8s.io/kube-openapi/pkg/util/BUILD.bazel deleted file mode 100644 index 3508b2fdebe..00000000000 --- a/vendor/k8s.io/kube-openapi/pkg/util/BUILD.bazel +++ /dev/null @@ -1,12 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "trie.go", - "util.go", - ], - importmap = "installer/vendor/k8s.io/kube-openapi/pkg/util", - importpath = "k8s.io/kube-openapi/pkg/util", - visibility = ["//visibility:public"], -) diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/BUILD.bazel b/vendor/k8s.io/kube-openapi/pkg/util/proto/BUILD.bazel deleted file mode 100644 index 9ec89876623..00000000000 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "document.go", - "openapi.go", - ], - importmap = "installer/vendor/k8s.io/kube-openapi/pkg/util/proto", - importpath = "k8s.io/kube-openapi/pkg/util/proto", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/BUILD.bazel deleted file mode 100644 index 845a67cc651..00000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "zz_generated.api.register.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster", - importpath = "sigs.k8s.io/cluster-api/pkg/apis/cluster", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", - "//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/BUILD.bazel deleted file mode 100644 index 5ec8f22c70a..00000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/BUILD.bazel +++ /dev/null @@ -1,9 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["consts.go"], - importmap = "installer/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common", - importpath = "sigs.k8s.io/cluster-api/pkg/apis/cluster/common", - visibility = ["//visibility:public"], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/BUILD.bazel deleted file mode 100644 index 91ae9bcfc83..00000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,36 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster_types.go", - "common_types.go", - "doc.go", - "machine_types.go", - "machinedeployment_types.go", - "machineset_types.go", - "zz_generated.api.register.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importmap = "installer/vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1", - importpath = "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster:go_default_library", - "//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/BUILD.bazel deleted file mode 100644 index fa1f1f144b5..00000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "clientset.go", - "doc.go", - ], - importmap = "installer/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset", - importpath = "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset", - visibility = ["//visibility:public"], - deps = [ - "//vendor/github.com/golang/glog:go_default_library", - "//vendor/k8s.io/client-go/discovery:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel deleted file mode 100644 index 5b5ced8da00..00000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/BUILD.bazel +++ /dev/null @@ -1,19 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "register.go", - ], - importmap = "installer/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme", - importpath = "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1:go_default_library", - ], -) diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/BUILD.bazel b/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/BUILD.bazel deleted file mode 100644 index a7f41823e6a..00000000000 --- a/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/BUILD.bazel +++ /dev/null @@ -1,26 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = [ - "cluster.go", - "cluster_client.go", - "doc.go", - "generated_expansion.go", - "machine.go", - "machinedeployment.go", - "machineset.go", - ], - importmap = "installer/vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1", - importpath = "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1", - visibility = ["//visibility:public"], - deps = [ - "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", - "//vendor/k8s.io/client-go/rest:go_default_library", - "//vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1:go_default_library", - "//vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme:go_default_library", - ], -) From c90a692f0b28da67187ee15a45005464dfad24fa Mon Sep 17 00:00:00 2001 From: Alex Crawford Date: Wed, 26 Sep 2018 14:30:29 -0700 Subject: [PATCH 2/7] types/config: remove unused members The installer no longer reads Ignition files or the CA from disk, so these aren't needed. --- pkg/types/config/cluster.go | 4 -- pkg/types/config/types.go | 12 +--- pkg/types/config/validate.go | 94 ------------------------------- pkg/types/config/validate_test.go | 38 ------------- 4 files changed, 2 insertions(+), 146 deletions(-) diff --git a/pkg/types/config/cluster.go b/pkg/types/config/cluster.go index 75c2e29c9d8..aa7ac6d3341 100644 --- a/pkg/types/config/cluster.go +++ b/pkg/types/config/cluster.go @@ -50,9 +50,6 @@ var defaultCluster = Cluster{ Region: aws.DefaultRegion, VPCCIDRBlock: aws.DefaultVPCCIDRBlock, }, - CA: CA{ - RootCAKeyAlg: "RSA", - }, Libvirt: libvirt.Libvirt{ Network: libvirt.Network{ IfName: libvirt.DefaultIfName, @@ -71,7 +68,6 @@ type Cluster struct { Admin `json:",inline" yaml:"admin,omitempty"` aws.AWS `json:",inline" yaml:"aws,omitempty"` BaseDomain string `json:"tectonic_base_domain,omitempty" yaml:"baseDomain,omitempty"` - CA `json:",inline" yaml:"CA,omitempty"` IgnitionBootstrap string `json:"ignition_bootstrap,omitempty" yaml:"-"` IgnitionMasters []string `json:"ignition_masters,omitempty" yaml:"-"` diff --git a/pkg/types/config/types.go b/pkg/types/config/types.go index 6e64649585c..70f64d19f89 100644 --- a/pkg/types/config/types.go +++ b/pkg/types/config/types.go @@ -9,18 +9,10 @@ type Admin struct { SSHKey string `json:"tectonic_admin_ssh_key,omitempty" yaml:"sshKey,omitempty"` } -// CA related config -type CA struct { - RootCACertPath string `json:"-" yaml:"rootCACertPath,omitempty"` - RootCAKeyPath string `json:"-" yaml:"rootCAKeyPath,omitempty"` - RootCAKeyAlg string `json:"-" yaml:"rootCAKeyAlg,omitempty"` -} - // NodePool converts node pool related config. type NodePool struct { - Count int `json:"-" yaml:"count"` - Name string `json:"-" yaml:"name"` - IgnitionFile string `json:"-" yaml:"ignitionFile"` + Count int `json:"-" yaml:"count"` + Name string `json:"-" yaml:"name"` } // NodePools converts node pools related config. diff --git a/pkg/types/config/validate.go b/pkg/types/config/validate.go index 440513d3a1c..2673d5c13c5 100644 --- a/pkg/types/config/validate.go +++ b/pkg/types/config/validate.go @@ -3,14 +3,12 @@ package config import ( "errors" "fmt" - "io/ioutil" "regexp" "strings" "github.com/openshift/installer/installer/pkg/validate" "github.com/openshift/installer/pkg/types/config/aws" - ignconfig "github.com/coreos/ignition/config/v2_2" "github.com/coreos/tectonic-config/config/tectonic-network" log "github.com/sirupsen/logrus" ) @@ -75,12 +73,10 @@ func (e *ErrInvalidIgnConfig) Error() string { func (c *Cluster) Validate() []error { var errs []error errs = append(errs, c.validateNodePools()...) - errs = append(errs, c.validateIgnitionFiles()...) errs = append(errs, c.validateNetworking()...) errs = append(errs, c.validateAWS()...) errs = append(errs, c.validatePullSecret()...) errs = append(errs, c.validateLibvirt()...) - errs = append(errs, c.validateCA()...) if err := validate.PrefixError("cluster name", validate.ClusterName(c.Name)); err != nil { errs = append(errs, err) } @@ -258,41 +254,6 @@ func (c *Cluster) validatePullSecret() []error { return errs } -func (c *Cluster) validateIgnitionFiles() []error { - var errs []error - for _, n := range c.NodePools { - if n.IgnitionFile == "" { - continue - } - - if err := validate.FileExists(n.IgnitionFile); err != nil { - errs = append(errs, err) - continue - } - - if err := validateIgnitionConfig(n.IgnitionFile); err != nil { - errs = append(errs, err) - } - } - return errs -} - -func validateIgnitionConfig(filePath string) error { - blob, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - - _, rpt, _ := ignconfig.Parse(blob) - if len(rpt.Entries) > 0 { - return &ErrInvalidIgnConfig{ - filePath, - rpt.String(), - } - } - return nil -} - func (c *Cluster) validateNodePools() []error { var errs []error n := c.NodePools.Map() @@ -352,58 +313,3 @@ func (c *Cluster) validateNoSharedNodePools() []error { } return errs } - -func (c *Cluster) validateCA() []error { - var errs []error - - switch { - case (c.CA.RootCACertPath == "") != (c.CA.RootCAKeyPath == ""): - errs = append(errs, fmt.Errorf("rootCACertPath and rootCAKeyPath must both be set or empty")) - case c.CA.RootCAKeyPath != "": - if err := validate.FileExists(c.CA.RootCAKeyPath); err != nil { - errs = append(errs, err) - } - if err := validateCAKey(c.CA.RootCAKeyPath); err != nil { - errs = append(errs, err) - } - fallthrough - case c.CA.RootCACertPath != "": - if err := validate.FileExists(c.CA.RootCACertPath); err != nil { - errs = append(errs, err) - } - if err := validateCACert(c.CA.RootCACertPath); err != nil { - errs = append(errs, err) - } - } - return errs -} - -// validateCAKey validates ֿthe content of the private key file -func validateCAKey(path string) error { - data, err := ioutil.ReadFile(path) - if err != nil { - return fmt.Errorf("failed to read private key file: %v", err) - } - pem := string(data) - - // Validate that file content is a valid Key - if err := validate.PrivateKey(pem); err != nil { - return fmt.Errorf("invalid private key (%s): %v", path, err) - } - return nil -} - -// validateCACert validates the content of the certificate file -func validateCACert(path string) error { - data, err := ioutil.ReadFile(path) - if err != nil { - return fmt.Errorf("failed to read certificate file: %v", err) - } - pem := string(data) - - // Validate that file content is a valid certificate - if err := validate.Certificate(pem); err != nil { - return fmt.Errorf("invalid certificate (%s): %v", path, err) - } - return nil -} diff --git a/pkg/types/config/validate_test.go b/pkg/types/config/validate_test.go index 36836f6df8c..1e69795feee 100644 --- a/pkg/types/config/validate_test.go +++ b/pkg/types/config/validate_test.go @@ -1,7 +1,6 @@ package config import ( - "os" "testing" "github.com/openshift/installer/pkg/types/config/aws" @@ -405,43 +404,6 @@ func TestS3BucketNames(t *testing.T) { } } -func TestValidateIgnitionFiles(t *testing.T) { - c := Cluster{ - NodePools: NodePools{ - { - Name: "error: invalid path", - IgnitionFile: "do-not-exist.ign", - }, - { - Name: "error: invalid config", - IgnitionFile: "fixtures/invalid-ign.ign", - }, - { - Name: "ok: no field", - }, - { - Name: "ok: empty field", - IgnitionFile: "", - }, - { - Name: "ok: valid config", - IgnitionFile: "fixtures/ign.ign", - }, - }, - } - - errs := c.validateIgnitionFiles() - if len(errs) != 2 { - t.Errorf("expected: %d ignition errors, got: %d", 2, len(errs)) - } - if !os.IsNotExist(errs[0]) { - t.Errorf("expected: notExistError, got: %v", errs[0]) - } - if _, ok := errs[1].(*ErrInvalidIgnConfig); !ok { - t.Errorf("expected: ErrInvalidIgnConfig, got: %v", errs[1]) - } -} - func TestValidateLibvirt(t *testing.T) { cases := []struct { cluster Cluster From 496c5ab7a938a7ce4d1d13109693f35ec76f211f Mon Sep 17 00:00:00 2001 From: Alex Crawford Date: Wed, 26 Sep 2018 15:10:15 -0700 Subject: [PATCH 3/7] *: copy validate from installer package The installer package is going to be removed. This moves the remaining validation functionality. --- pkg/asset/installconfig/ssh.go | 42 +++- pkg/asset/installconfig/ssh_test.go | 38 ++++ pkg/asset/installconfig/stock.go | 10 +- pkg/types/config/validate.go | 291 ++++++++++++++++++++++++-- pkg/types/config/validate_test.go | 305 ++++++++++++++++++++++++++++ 5 files changed, 658 insertions(+), 28 deletions(-) create mode 100644 pkg/asset/installconfig/ssh_test.go diff --git a/pkg/asset/installconfig/ssh.go b/pkg/asset/installconfig/ssh.go index 0d57a1a4943..cbbb2882a56 100644 --- a/pkg/asset/installconfig/ssh.go +++ b/pkg/asset/installconfig/ssh.go @@ -1,15 +1,17 @@ package installconfig import ( + "errors" "fmt" "io/ioutil" "os" "path/filepath" + "regexp" "sort" + "strings" "github.com/AlecAivazis/survey" - "github.com/openshift/installer/installer/pkg/validate" "github.com/openshift/installer/pkg/asset" ) @@ -30,7 +32,7 @@ func readSSHKey(path string) (key []byte, err error) { return key, err } - err = validate.OpenSSHPublicKey(string(key)) + err = validateOpenSSHPublicKey(string(key)) if err != nil { return key, err } @@ -42,7 +44,7 @@ func readSSHKey(path string) (key []byte, err error) { func (a *sshPublicKey) Generate(map[asset.Asset]*asset.State) (state *asset.State, err error) { if value, ok := os.LookupEnv("OPENSHIFT_INSTALL_SSH_PUB_KEY"); ok { if value != "" { - if err := validate.OpenSSHPublicKey(value); err != nil { + if err := validateOpenSSHPublicKey(value); err != nil { return nil, err } } @@ -118,3 +120,37 @@ func (a *sshPublicKey) Generate(map[asset.Asset]*asset.State) (state *asset.Stat func (a *sshPublicKey) Name() string { return "SSH Key" } + +// validateOpenSSHPublicKey checks if the given string is a valid OpenSSH public key and returns an error if not. +// Ignores leading and trailing whitespace. +func validateOpenSSHPublicKey(v string) error { + trimmed := strings.TrimSpace(v) + + // Don't let users hang themselves + if isMatch(`-BEGIN [\w-]+ PRIVATE KEY-`, trimmed) { + return errors.New("invalid SSH public key (appears to be a private key)") + } + + if strings.Contains(trimmed, "\n") { + return errors.New("invalid SSH public key (should not contain any newline characters)") + } + + invalidError := errors.New("invalid SSH public key") + + keyParts := regexp.MustCompile(`\s+`).Split(trimmed, -1) + if len(keyParts) < 2 { + return invalidError + } + + keyType := keyParts[0] + keyBase64 := keyParts[1] + if !isMatch(`^[\w-]+$`, keyType) || !isMatch(`^[A-Za-z0-9+\/]+={0,2}$`, keyBase64) { + return invalidError + } + + return nil +} + +func isMatch(re string, v string) bool { + return regexp.MustCompile(re).MatchString(v) +} diff --git a/pkg/asset/installconfig/ssh_test.go b/pkg/asset/installconfig/ssh_test.go new file mode 100644 index 00000000000..acb89747a4b --- /dev/null +++ b/pkg/asset/installconfig/ssh_test.go @@ -0,0 +1,38 @@ +package installconfig + +import ( + "testing" +) + +func TestOpenSSHPublicKey(t *testing.T) { + const invalidMsg = "invalid SSH public key" + const multiLineMsg = "invalid SSH public key (should not contain any newline characters)" + const privateKeyMsg = "invalid SSH public key (appears to be a private key)" + tests := []struct { + in string + expected string + }{ + {"a", invalidMsg}, + {".", invalidMsg}, + {"日本語", invalidMsg}, + {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", ""}, + {"ssh-rsa \t AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", ""}, + {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com", ""}, + {"\nssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com", ""}, + {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com\n", ""}, + {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL\nssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", multiLineMsg}, + {"ssh-rsa\nAAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com", multiLineMsg}, + {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL\nyou@example.com", multiLineMsg}, + {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", ""}, + {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCt3BebCHqnSsgpLjo4kVvyfY/z2BS8t27r/7du+O2pb4xYkr7n+KFpbOz523vMTpQ+o1jY4u4TgexglyT9nqasWgLOvo1qjD1agHme8LlTPQSk07rXqOB85Uq5p7ig2zoOejF6qXhcc3n1c7+HkxHrgpBENjLVHOBpzPBIAHkAGaZcl07OCqbsG5yxqEmSGiAlh/IiUVOZgdDMaGjCRFy0wk0mQaGD66DmnFc1H5CzcPjsxr0qO65e7lTGsE930KkO1Vc+RHCVwvhdXs+c2NhJ2/3740Kpes9n1/YullaWZUzlCPDXtRuy6JRbFbvy39JUgHWGWzB3d+3f8oJ/N4qZ cardno:000603633110", ""}, + {"-----BEGIN CERTIFICATE-----abcd-----END CERTIFICATE-----", invalidMsg}, + {"-----BEGIN RSA PRIVATE KEY-----\nabc\n-----END RSA PRIVATE KEY-----", privateKeyMsg}, + } + + for _, test := range tests { + err := validateOpenSSHPublicKey(test.in) + if (err == nil && test.expected != "") || (err != nil && err.Error() != test.expected) { + t.Errorf("For %q, expected %q, got %q", test.in, test.expected, err) + } + } +} diff --git a/pkg/asset/installconfig/stock.go b/pkg/asset/installconfig/stock.go index c214a9f7a4c..3339131dbff 100644 --- a/pkg/asset/installconfig/stock.go +++ b/pkg/asset/installconfig/stock.go @@ -3,8 +3,8 @@ package installconfig import ( "github.com/AlecAivazis/survey" - "github.com/openshift/installer/installer/pkg/validate" "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/types/config" ) // Stock is the stock of InstallConfig assets that can be generated. @@ -58,7 +58,7 @@ func (s *StockImpl) EstablishStock() { Help: "The email address of the cluster administrator. This will be used to log in to the console.", }, Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - return validate.Email(ans.(string)) + return config.ValidateEmail(ans.(string)) }), }, EnvVarName: "OPENSHIFT_INSTALL_EMAIL_ADDRESS", @@ -81,7 +81,7 @@ func (s *StockImpl) EstablishStock() { Help: "The base domain of the cluster. All DNS records will be sub-domains of this base.", }, Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - return validate.DomainName(ans.(string)) + return config.ValidateDomainName(ans.(string)) }), }, EnvVarName: "OPENSHIFT_INSTALL_BASE_DOMAIN", @@ -94,7 +94,7 @@ func (s *StockImpl) EstablishStock() { Help: "The name of the cluster. This will be used when generating sub-domains.", }, Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - return validate.DomainName(ans.(string)) + return config.ValidateDomainName(ans.(string)) }), }, EnvVarName: "OPENSHIFT_INSTALL_CLUSTER_NAME", @@ -107,7 +107,7 @@ func (s *StockImpl) EstablishStock() { Help: "The container registry pull secret for this cluster.", }, Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - return validate.JSON([]byte(ans.(string))) + return config.ValidateJSON([]byte(ans.(string))) }), }, EnvVarName: "OPENSHIFT_INSTALL_PULL_SECRET", diff --git a/pkg/types/config/validate.go b/pkg/types/config/validate.go index 2673d5c13c5..e2843dfe17c 100644 --- a/pkg/types/config/validate.go +++ b/pkg/types/config/validate.go @@ -1,12 +1,15 @@ package config import ( + "encoding/json" "errors" "fmt" + "net" "regexp" + "strconv" "strings" + "unicode/utf8" - "github.com/openshift/installer/installer/pkg/validate" "github.com/openshift/installer/pkg/types/config/aws" "github.com/coreos/tectonic-config/config/tectonic-network" @@ -77,16 +80,16 @@ func (c *Cluster) Validate() []error { errs = append(errs, c.validateAWS()...) errs = append(errs, c.validatePullSecret()...) errs = append(errs, c.validateLibvirt()...) - if err := validate.PrefixError("cluster name", validate.ClusterName(c.Name)); err != nil { + if err := prefixError("cluster name", validateClusterName(c.Name)); err != nil { errs = append(errs, err) } - if err := validate.PrefixError("base domain", validate.DomainName(c.BaseDomain)); err != nil { + if err := prefixError("base domain", ValidateDomainName(c.BaseDomain)); err != nil { errs = append(errs, err) } - if err := validate.PrefixError("admin password", validate.NonEmpty(c.Admin.Password)); err != nil { + if err := prefixError("admin password", validateNonEmpty(c.Admin.Password)); err != nil { errs = append(errs, err) } - if err := validate.PrefixError("admin email", validate.Email(c.Admin.Email)); err != nil { + if err := prefixError("admin email", ValidateEmail(c.Admin.Email)); err != nil { errs = append(errs, err) } return errs @@ -104,14 +107,14 @@ func (c *Cluster) validateAWS() []error { if err := c.validateS3Bucket(); err != nil { errs = append(errs, err) } - if err := validate.PrefixError("aws vpcCIDRBlock", validate.SubnetCIDR(c.AWS.VPCCIDRBlock)); err != nil { + if err := prefixError("aws vpcCIDRBlock", validateSubnetCIDR(c.AWS.VPCCIDRBlock)); err != nil { errs = append(errs, err) } errs = append(errs, c.validateOverlapWithPodOrServiceCIDR(c.AWS.VPCCIDRBlock, "aws vpcCIDRBlock")...) - if err := validate.PrefixError("aws profile", validate.NonEmpty(c.AWS.Profile)); err != nil { + if err := prefixError("aws profile", validateNonEmpty(c.AWS.Profile)); err != nil { errs = append(errs, err) } - if err := validate.PrefixError("aws region", validate.NonEmpty(c.AWS.Region)); err != nil { + if err := prefixError("aws region", validateNonEmpty(c.AWS.Region)); err != nil { errs = append(errs, err) } return errs @@ -121,10 +124,10 @@ func (c *Cluster) validateAWS() []error { // overlap with the pod or service CIDRs of the cluster config. func (c *Cluster) validateOverlapWithPodOrServiceCIDR(cidr, name string) []error { var errs []error - if err := validate.PrefixError(fmt.Sprintf("%s and podCIDR", name), validate.CIDRsDontOverlap(cidr, c.Networking.PodCIDR)); err != nil { + if err := prefixError(fmt.Sprintf("%s and podCIDR", name), validateCIDRsDontOverlap(cidr, c.Networking.PodCIDR)); err != nil { errs = append(errs, err) } - if err := validate.PrefixError(fmt.Sprintf("%s and serviceCIDR", name), validate.CIDRsDontOverlap(cidr, c.Networking.ServiceCIDR)); err != nil { + if err := prefixError(fmt.Sprintf("%s and serviceCIDR", name), validateCIDRsDontOverlap(cidr, c.Networking.ServiceCIDR)); err != nil { errs = append(errs, err) } return errs @@ -136,7 +139,7 @@ func (c *Cluster) validateLibvirt() []error { if c.Platform != PlatformLibvirt { return errs } - if err := validate.PrefixError("libvirt network ipRange", validate.SubnetCIDR(c.Libvirt.Network.IPRange)); err != nil { + if err := prefixError("libvirt network ipRange", validateSubnetCIDR(c.Libvirt.Network.IPRange)); err != nil { errs = append(errs, err) } if len(c.Libvirt.MasterIPs) > 0 { @@ -144,18 +147,18 @@ func (c *Cluster) validateLibvirt() []error { errs = append(errs, fmt.Errorf("length of masterIPs does't match master count")) } for i, ip := range c.Libvirt.MasterIPs { - if err := validate.PrefixError(fmt.Sprintf("libvirt masterIPs[%d] %q", i, ip), validate.IPv4(ip)); err != nil { + if err := prefixError(fmt.Sprintf("libvirt masterIPs[%d] %q", i, ip), validateIPv4(ip)); err != nil { errs = append(errs, err) } } } - if err := validate.PrefixError("libvirt uri", validate.NonEmpty(c.Libvirt.URI)); err != nil { + if err := prefixError("libvirt uri", validateNonEmpty(c.Libvirt.URI)); err != nil { errs = append(errs, err) } - if err := validate.PrefixError("libvirt network name", validate.NonEmpty(c.Libvirt.Network.Name)); err != nil { + if err := prefixError("libvirt network name", validateNonEmpty(c.Libvirt.Network.Name)); err != nil { errs = append(errs, err) } - if err := validate.PrefixError("libvirt network ifName", validate.NonEmpty(c.Libvirt.Network.IfName)); err != nil { + if err := prefixError("libvirt network ifName", validateNonEmpty(c.Libvirt.Network.IfName)); err != nil { errs = append(errs, err) } errs = append(errs, c.validateOverlapWithPodOrServiceCIDR(c.Libvirt.Network.IPRange, "libvirt ipRange")...) @@ -165,19 +168,19 @@ func (c *Cluster) validateLibvirt() []error { func (c *Cluster) validateNetworking() []error { var errs []error // https://en.wikipedia.org/wiki/Maximum_transmission_unit#MTUs_for_common_media - if err := validate.PrefixError("mtu", validate.IntRange(c.Networking.MTU, 68, 64*1024)); err != nil { + if err := prefixError("mtu", validateIntRange(c.Networking.MTU, 68, 64*1024)); err != nil { errs = append(errs, err) } - if err := validate.PrefixError("podCIDR", validate.SubnetCIDR(c.Networking.PodCIDR)); err != nil { + if err := prefixError("podCIDR", validateSubnetCIDR(c.Networking.PodCIDR)); err != nil { errs = append(errs, err) } - if err := validate.PrefixError("serviceCIDR", validate.SubnetCIDR(c.Networking.ServiceCIDR)); err != nil { + if err := prefixError("serviceCIDR", validateSubnetCIDR(c.Networking.ServiceCIDR)); err != nil { errs = append(errs, err) } if err := c.validateNetworkType(); err != nil { errs = append(errs, err) } - if err := validate.PrefixError("pod and service CIDRs", validate.CIDRsDontOverlap(c.Networking.PodCIDR, c.Networking.ServiceCIDR)); err != nil { + if err := prefixError("pod and service CIDRs", validateCIDRsDontOverlap(c.Networking.PodCIDR, c.Networking.ServiceCIDR)); err != nil { errs = append(errs, err) } return errs @@ -248,7 +251,7 @@ func (c *Cluster) validateS3Bucket() error { func (c *Cluster) validatePullSecret() []error { var errs []error - if err := validate.JSON([]byte(c.PullSecret)); err != nil { + if err := ValidateJSON([]byte(c.PullSecret)); err != nil { errs = append(errs, err) } return errs @@ -313,3 +316,251 @@ func (c *Cluster) validateNoSharedNodePools() []error { } return errs } + +// ValidateDomainName checks if the given string is a valid domain name and returns an error if not. +func ValidateDomainName(v string) error { + if err := validateNonEmpty(v); err != nil { + return err + } + + split := strings.Split(v, ".") + for i, segment := range split { + // Trailing dot is OK + if len(segment) == 0 && i == len(split)-1 { + continue + } + if !isMatch("^[a-zA-Z0-9-]{1,63}$", segment) { + return errors.New("invalid domain name") + } + } + return nil +} + +// ValidateEmail checks if the given string is a valid email address and returns an error if not. +func ValidateEmail(v string) error { + if err := validateNonEmpty(v); err != nil { + return err + } + + invalidError := errors.New("invalid email address") + + split := strings.Split(v, "@") + if len(split) != 2 { + return invalidError + } + localPart := split[0] + domain := split[1] + + if validateNonEmpty(localPart) != nil { + return invalidError + } + + // No whitespace allowed in local-part + if isMatch(`\s`, localPart) { + return invalidError + } + + return ValidateDomainName(domain) +} + +// ValidateJSON validates that the given data is valid JSON. +func ValidateJSON(data []byte) error { + var dummy interface{} + return json.Unmarshal(data, &dummy) +} + +// prefixError wraps an error with a prefix or returns nil if there was no error. +// This is useful for wrapping errors returned by generic error funcs like `validateNonEmpty` so that the error includes the offending field name. +func prefixError(prefix string, err error) error { + if err != nil { + return fmt.Errorf("%s: %v", prefix, err) + } + return nil +} + +func isMatch(re string, v string) bool { + return regexp.MustCompile(re).MatchString(v) +} + +// validateClusterName checks if the given string is a valid name for a cluster and returns an error if not. +func validateClusterName(v string) error { + if err := validateNonEmpty(v); err != nil { + return err + } + + if length := utf8.RuneCountInString(v); length < 1 || length > 253 { + return errors.New("must be between 1 and 253 characters") + } + + if strings.ToLower(v) != v { + return errors.New("must be lower case") + } + + if !isMatch("^[a-z0-9-.]*$", v) { + return errors.New("only lower case alphanumeric [a-z0-9], dashes and dots are allowed") + } + + isAlphaNum := regexp.MustCompile("^[a-z0-9]$").MatchString + + // If we got this far, we know the string is ASCII and has at least one character + if !isAlphaNum(v[:1]) || !isAlphaNum(v[len(v)-1:]) { + return errors.New("must start and end with a lower case alphanumeric character [a-z0-9]") + } + + for _, segment := range strings.Split(v, ".") { + // Each segment can have up to 63 characters + if utf8.RuneCountInString(segment) > 63 { + return errors.New("no segment between dots can be more than 63 characters") + } + if !isAlphaNum(segment[:1]) || !isAlphaNum(segment[len(segment)-1:]) { + return errors.New("segments between dots must start and end with a lower case alphanumeric character [a-z0-9]") + } + } + + return nil +} + +// validateNonEmpty checks if the given string contains at least one non-whitespace character and returns an error if not. +func validateNonEmpty(v string) error { + if utf8.RuneCountInString(strings.TrimSpace(v)) == 0 { + return errors.New("cannot be empty") + } + return nil +} + +// validateSubnetCIDR checks if the given string is a valid CIDR for a master nodes or worker nodes subnet and returns an error if not. +func validateSubnetCIDR(v string) error { + if err := validateNonEmpty(v); err != nil { + return err + } + + split := strings.Split(v, "/") + + if len(split) == 1 { + return errors.New("must provide a CIDR netmask (eg, /24)") + } + + if len(split) != 2 { + return errors.New("invalid IPv4 address") + } + + ip := split[0] + + if err := validateIPv4(ip); err != nil { + return errors.New("invalid IPv4 address") + } + + if mask, err := strconv.Atoi(split[1]); err != nil || mask < 0 || mask > 32 { + return errors.New("invalid netmask size (must be between 0 and 32)") + } + + // Catch any invalid CIDRs not caught by the checks above + if _, _, err := net.ParseCIDR(v); err != nil { + return errors.New("invalid CIDR") + } + + if strings.HasPrefix(ip, "172.17.") { + return errors.New("overlaps with default Docker Bridge subnet (172.17.0.0/16)") + } + + return nil +} + +// validateCIDRsDontOverlap ensures two given CIDRs don't overlap +// with one another. CIDR starting IPs are canonicalized +// before being compared. +func validateCIDRsDontOverlap(acidr, bcidr string) error { + _, a, err := net.ParseCIDR(acidr) + if err != nil { + return fmt.Errorf("invalid CIDR %q: %v", acidr, err) + } + if err := canonicalizeIP(&a.IP); err != nil { + return fmt.Errorf("invalid CIDR %q: %v", acidr, err) + } + _, b, err := net.ParseCIDR(bcidr) + if err != nil { + return fmt.Errorf("invalid CIDR %q: %v", bcidr, err) + } + if err := canonicalizeIP(&b.IP); err != nil { + return fmt.Errorf("invalid CIDR %q: %v", bcidr, err) + } + err = fmt.Errorf("%q and %q overlap", acidr, bcidr) + // IPs are of different families. + if len(a.IP) != len(b.IP) { + return nil + } + if a.Contains(b.IP) { + return err + } + if a.Contains(lastIP(b)) { + return err + } + if b.Contains(a.IP) { + return err + } + if b.Contains(lastIP(a)) { + return err + } + return nil +} + +// validateIPv4 checks if the given string is a valid IP v4 address and returns an error if not. +// Based on net.ParseIP. +func validateIPv4(v string) error { + if err := validateNonEmpty(v); err != nil { + return err + } + if ip := net.ParseIP(v); ip == nil || !strings.Contains(v, ".") { + return errors.New("invalid IPv4 address") + } + return nil +} + +// validateIntRange checks if the given string is a valid integer between `min` and `max` and returns an error if not. +func validateIntRange(v string, min int, max int) error { + i, err := strconv.Atoi(v) + if err != nil { + return validateInt(v) + } + if i < min { + return fmt.Errorf("cannot be less than %v", min) + } + if i > max { + return fmt.Errorf("cannot be greater than %v", max) + } + return nil +} + +// validateInt checks if the given string is a valid integer and returns an error if not. +func validateInt(v string) error { + if err := validateNonEmpty(v); err != nil { + return err + } + + if _, err := strconv.Atoi(v); err != nil { + return errors.New("invalid integer") + } + return nil +} + +// canonicalizeIP ensures that the given IP is in standard form +// and returns an error otherwise. +func canonicalizeIP(ip *net.IP) error { + if ip.To4() != nil { + *ip = ip.To4() + return nil + } + if ip.To16() != nil { + *ip = ip.To16() + return nil + } + return fmt.Errorf("IP %q is of unknown type", ip) +} + +func lastIP(cidr *net.IPNet) net.IP { + var last net.IP + for i := 0; i < len(cidr.IP); i++ { + last = append(last, cidr.IP[i]|^cidr.Mask[i]) + } + return last +} diff --git a/pkg/types/config/validate_test.go b/pkg/types/config/validate_test.go index 1e69795feee..e63194a4729 100644 --- a/pkg/types/config/validate_test.go +++ b/pkg/types/config/validate_test.go @@ -1,6 +1,8 @@ package config import ( + "net" + "strings" "testing" "github.com/openshift/installer/pkg/types/config/aws" @@ -611,3 +613,306 @@ func TestValidateOverlapWithPodOrServiceCIDR(t *testing.T) { } } } + +func TestLastIP(t *testing.T) { + cases := []struct { + in net.IPNet + out net.IP + }{ + { + in: net.IPNet{ + IP: net.ParseIP("192.168.0.0").To4(), + Mask: net.CIDRMask(24, 32), + }, + out: net.ParseIP("192.168.0.255"), + }, + { + in: net.IPNet{ + IP: net.ParseIP("192.168.0.0").To4(), + Mask: net.CIDRMask(22, 32), + }, + out: net.ParseIP("192.168.3.255"), + }, + { + in: net.IPNet{ + IP: net.ParseIP("192.168.0.0").To4(), + Mask: net.CIDRMask(32, 32), + }, + out: net.ParseIP("192.168.0.0"), + }, + { + in: net.IPNet{ + IP: net.ParseIP("0.0.0.0").To4(), + Mask: net.CIDRMask(0, 32), + }, + out: net.ParseIP("255.255.255.255"), + }, + } + + var out net.IP + for i, c := range cases { + if out = lastIP(&c.in); out.String() != c.out.String() { + t.Errorf("test case %d: expected %s but got %s", i, c.out, out) + } + } +} + +const caseMsg = "must be lower case" +const emptyMsg = "cannot be empty" +const invalidDomainMsg = "invalid domain name" +const invalidHostMsg = "invalid host (must be a domain name or IP address)" +const invalidIPMsg = "invalid IPv4 address" +const invalidIntMsg = "invalid integer" +const invalidPortMsg = "invalid port number" +const noCIDRNetmaskMsg = "must provide a CIDR netmask (eg, /24)" + +type test struct { + in string + expected string +} + +type validator func(string) error + +func runTests(t *testing.T, funcName string, fn validator, tests []test) { + for _, test := range tests { + err := fn(test.in) + if (err == nil && test.expected != "") || (err != nil && err.Error() != test.expected) { + t.Errorf("For %s(%q), expected %q, got %q", funcName, test.in, test.expected, err) + } + } +} + +func TestNonEmpty(t *testing.T) { + tests := []test{ + {"", emptyMsg}, + {" ", emptyMsg}, + {"a", ""}, + {".", ""}, + {"日本語", ""}, + } + runTests(t, "NonEmpty", validateNonEmpty, tests) +} + +func TestInt(t *testing.T) { + tests := []test{ + {"", emptyMsg}, + {" ", emptyMsg}, + {"2 3", invalidIntMsg}, + {"1.1", invalidIntMsg}, + {"abc", invalidIntMsg}, + {"日本語", invalidIntMsg}, + {"1 abc", invalidIntMsg}, + {"日本語2", invalidIntMsg}, + {"0", ""}, + {"1", ""}, + {"999999", ""}, + {"-1", ""}, + } + runTests(t, "Int", validateInt, tests) +} + +func TestIntRange(t *testing.T) { + tests := []struct { + in string + min int + max int + expected string + }{ + {"", 4, 6, emptyMsg}, + {" ", 4, 6, emptyMsg}, + {"2 3", 1, 2, invalidIntMsg}, + {"1.1", 0, 0, invalidIntMsg}, + {"abc", -2, -1, invalidIntMsg}, + {"日本語", 99, 100, invalidIntMsg}, + {"5", 4, 6, ""}, + {"5", 5, 5, ""}, + {"5", 6, 8, "cannot be less than 6"}, + {"5", 6, 4, "cannot be less than 6"}, + {"5", 2, 4, "cannot be greater than 4"}, + } + + for _, test := range tests { + err := validateIntRange(test.in, test.min, test.max) + if (err == nil && test.expected != "") || (err != nil && err.Error() != test.expected) { + t.Errorf("For IntRange(%q, %v, %v), expected %q, got %q", test.in, test.min, test.max, test.expected, err) + } + } +} + +func TestClusterName(t *testing.T) { + const charsMsg = "only lower case alphanumeric [a-z0-9], dashes and dots are allowed" + const lengthMsg = "must be between 1 and 253 characters" + const segmentLengthMsg = "no segment between dots can be more than 63 characters" + const startEndCharMsg = "must start and end with a lower case alphanumeric character [a-z0-9]" + const segmentStartEndCharMsg = "segments between dots must start and end with a lower case alphanumeric character [a-z0-9]" + + maxSizeName := strings.Repeat("123456789.", 25) + "123" + maxSizeSegment := strings.Repeat("1234567890", 6) + "123" + + tests := []test{ + {"", emptyMsg}, + {" ", emptyMsg}, + {"a", ""}, + {"A", caseMsg}, + {"abc D", caseMsg}, + {"1", ""}, + {".", startEndCharMsg}, + {"a.", startEndCharMsg}, + {".a", startEndCharMsg}, + {"a.a", ""}, + {"-a", startEndCharMsg}, + {"a-", startEndCharMsg}, + {"a.-a", segmentStartEndCharMsg}, + {"a-.a", segmentStartEndCharMsg}, + {"a%a", charsMsg}, + {"日本語", charsMsg}, + {"a日本語a", charsMsg}, + {maxSizeName, ""}, + {maxSizeName + "a", lengthMsg}, + {maxSizeSegment + ".abc", ""}, + {maxSizeSegment + "a.abc", segmentLengthMsg}, + } + runTests(t, "ClusterName", validateClusterName, tests) +} + +func TestIPv4(t *testing.T) { + tests := []test{ + {"", emptyMsg}, + {" ", emptyMsg}, + {"0.0.0.0", ""}, + {"1.2.3.4", ""}, + {"1.2.3.", invalidIPMsg}, + {"1.2.3.4.", invalidIPMsg}, + {"1.2.3.a", invalidIPMsg}, + {"255.255.255.255", ""}, + } + runTests(t, "IPv4", validateIPv4, tests) +} + +func TestSubnetCIDR(t *testing.T) { + const netmaskSizeMsg = "invalid netmask size (must be between 0 and 32)" + + tests := []test{ + {"", emptyMsg}, + {" ", emptyMsg}, + {"/16", invalidIPMsg}, + {"0.0.0.0/0", ""}, + {"0.0.0.0/32", ""}, + {"1.2.3.4", noCIDRNetmaskMsg}, + {"1.2.3.", noCIDRNetmaskMsg}, + {"1.2.3.4.", noCIDRNetmaskMsg}, + {"1.2.3.4/0", ""}, + {"1.2.3.4/1", ""}, + {"1.2.3.4/31", ""}, + {"1.2.3.4/32", ""}, + {"1.2.3./16", invalidIPMsg}, + {"1.2.3.4./16", invalidIPMsg}, + {"1.2.3.4/33", netmaskSizeMsg}, + {"1.2.3.4/-1", netmaskSizeMsg}, + {"1.2.3.4/abc", netmaskSizeMsg}, + {"172.17.1.2", noCIDRNetmaskMsg}, + {"172.17.1.2/", netmaskSizeMsg}, + {"172.17.1.2/33", netmaskSizeMsg}, + {"172.17.1.2/20", "overlaps with default Docker Bridge subnet (172.17.0.0/16)"}, + {"255.255.255.255/1", ""}, + {"255.255.255.255/32", ""}, + } + runTests(t, "SubnetCIDR", validateSubnetCIDR, tests) +} + +func TestDomainName(t *testing.T) { + tests := []test{ + {"", emptyMsg}, + {" ", emptyMsg}, + {"a", ""}, + {".", invalidDomainMsg}, + {"日本語", invalidDomainMsg}, + {"日本語.com", invalidDomainMsg}, + {"abc.日本語.com", invalidDomainMsg}, + {"a日本語a.com", invalidDomainMsg}, + {"abc", ""}, + {"ABC", ""}, + {"ABC123", ""}, + {"ABC123.COM123", ""}, + {"1", ""}, + {"0.0", ""}, + {"1.2.3.4", ""}, + {"1.2.3.4.", ""}, + {"abc.", ""}, + {"abc.com", ""}, + {"abc.com.", ""}, + {"a.b.c.d.e.f", ""}, + {".abc", invalidDomainMsg}, + {".abc.com", invalidDomainMsg}, + {".abc.com", invalidDomainMsg}, + } + runTests(t, "DomainName", ValidateDomainName, tests) +} + +func TestEmail(t *testing.T) { + const invalidMsg = "invalid email address" + tests := []test{ + {"", emptyMsg}, + {" ", emptyMsg}, + {"a", invalidMsg}, + {".", invalidMsg}, + {"日本語", invalidMsg}, + {"a@abc.com", ""}, + {"A@abc.com", ""}, + {"1@abc.com", ""}, + {"a.B.1.あ@abc.com", ""}, + {"ア@abc.com", ""}, + {"中文@abc.com", ""}, + {"a@abc.com", ""}, + {"a@ABC.com", ""}, + {"a@123.com", ""}, + {"a@日本語.com", invalidDomainMsg}, + {"a@.com", invalidDomainMsg}, + {"@abc.com", invalidMsg}, + } + runTests(t, "Email", ValidateEmail, tests) +} + +func TestCIDRsDontOverlap(t *testing.T) { + cases := []struct { + a string + b string + err bool + }{ + { + a: "192.168.0.0/24", + b: "192.168.0.0/24", + err: true, + }, + { + a: "192.168.0.0/24", + b: "192.168.0.3/24", + err: true, + }, + { + a: "192.168.0.0/30", + b: "192.168.0.3/30", + err: true, + }, + { + a: "192.168.0.0/30", + b: "192.168.0.4/30", + err: false, + }, + { + a: "0.0.0.0/0", + b: "192.168.0.0/24", + err: true, + }, + } + + for i, c := range cases { + if err := validateCIDRsDontOverlap(c.a, c.b); (err != nil) != c.err { + no := "no" + if c.err { + no = "an" + } + t.Errorf("test case %d: expected %s error, got %v", i, no, err) + } + } +} From b6c0d8c1e74d74ea007e11a4049d23b96d67bdb8 Mon Sep 17 00:00:00 2001 From: Alex Crawford Date: Wed, 26 Sep 2018 15:11:46 -0700 Subject: [PATCH 4/7] installer: remove package This is the last of the old installer code. It's no longer used and can be removed. --- .gitignore | 43 -- CONTRIBUTING.md | 1 - examples/aws.yaml | 234 ------- examples/libvirt.yaml | 103 --- hack/test-bazel-build-tarball.sh | 12 - installer/cmd/tectonic/main.go | 54 -- .../fixtures/generated/tls/aggregator-ca.crt | 20 - .../fixtures/kube-system.yaml | 109 ---- .../config-generator/fixtures/test-aws.yaml | 40 -- .../pkg/config-generator/fixtures/test.yaml | 11 - installer/pkg/config-generator/generator.go | 521 --------------- .../pkg/config-generator/generator_test.go | 171 ----- installer/pkg/config-generator/ignition.go | 146 ----- installer/pkg/config-generator/tls.go | 406 ------------ installer/pkg/copy/copy.go | 26 - installer/pkg/copy/copy_test.go | 38 -- installer/pkg/validate/fixtures/exists | 0 installer/pkg/validate/last_ip_test.go | 49 -- installer/pkg/validate/validate.go | 470 -------------- installer/pkg/validate/validate_test.go | 612 ------------------ installer/pkg/workflow/convert.go | 43 -- installer/pkg/workflow/destroy.go | 160 ----- .../pkg/workflow/fixtures/aws.basic.yaml | 39 -- .../pkg/workflow/fixtures/terraform.tfvars | 27 - installer/pkg/workflow/init.go | 122 ---- installer/pkg/workflow/init_test.go | 126 ---- installer/pkg/workflow/install.go | 92 --- installer/pkg/workflow/utils.go | 102 --- installer/pkg/workflow/workflow.go | 49 -- installer/pkg/workflow/workflow_test.go | 53 -- 30 files changed, 3879 deletions(-) delete mode 100644 examples/aws.yaml delete mode 100644 examples/libvirt.yaml delete mode 100755 hack/test-bazel-build-tarball.sh delete mode 100644 installer/cmd/tectonic/main.go delete mode 100644 installer/pkg/config-generator/fixtures/generated/tls/aggregator-ca.crt delete mode 100644 installer/pkg/config-generator/fixtures/kube-system.yaml delete mode 100644 installer/pkg/config-generator/fixtures/test-aws.yaml delete mode 100644 installer/pkg/config-generator/fixtures/test.yaml delete mode 100644 installer/pkg/config-generator/generator.go delete mode 100644 installer/pkg/config-generator/generator_test.go delete mode 100644 installer/pkg/config-generator/ignition.go delete mode 100644 installer/pkg/config-generator/tls.go delete mode 100644 installer/pkg/copy/copy.go delete mode 100644 installer/pkg/copy/copy_test.go delete mode 100644 installer/pkg/validate/fixtures/exists delete mode 100644 installer/pkg/validate/last_ip_test.go delete mode 100644 installer/pkg/validate/validate.go delete mode 100644 installer/pkg/validate/validate_test.go delete mode 100644 installer/pkg/workflow/convert.go delete mode 100644 installer/pkg/workflow/destroy.go delete mode 100644 installer/pkg/workflow/fixtures/aws.basic.yaml delete mode 100644 installer/pkg/workflow/fixtures/terraform.tfvars delete mode 100644 installer/pkg/workflow/init.go delete mode 100644 installer/pkg/workflow/init_test.go delete mode 100644 installer/pkg/workflow/install.go delete mode 100644 installer/pkg/workflow/utils.go delete mode 100644 installer/pkg/workflow/workflow.go delete mode 100644 installer/pkg/workflow/workflow_test.go diff --git a/.gitignore b/.gitignore index d719d0491d6..6c47db76625 100644 --- a/.gitignore +++ b/.gitignore @@ -16,46 +16,3 @@ bin_test/ matchbox/ /contrib/govcloud/vpn.conf tectonic-dev - -# non-default Bazel stuff -.build/ -.cache - -# Created by https://www.gitignore.io/api/go,bazel,terraform -# HOWEVER, I had to remove the ignore for `vendor/` - -### Bazel ### -/bazel-* - -### Go ### -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -### Terraform ### -# Local .terraform directories -**/.terraform/* - -# .tfstate files -*.tfstate -*.tfstate.* - -# Crash log files -crash.log - -# Ignore any .tfvars files that are generated automatically for each Terraform run. Most -# .tfvars files are managed as part of configuration and so should be included in -# version control. -# -# example.tfvars - -# End of https://www.gitignore.io/api/go,bazel,terraform diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 24ee0b39fc6..e9e9029e5a5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -40,7 +40,6 @@ For contributors who want to work up pull requests, the workflow is roughly: hack/go-lint.sh $(go list -f '{{ .ImportPath }}' ./...) hack/go-vet.sh ./... hack/shellcheck.sh - hack/test-bazel-build-tarball.sh hack/tf-fmt.sh -list -check hack/tf-lint.sh hack/yaml-lint.sh diff --git a/examples/aws.yaml b/examples/aws.yaml deleted file mode 100644 index 676d7f52dec..00000000000 --- a/examples/aws.yaml +++ /dev/null @@ -1,234 +0,0 @@ -admin: - email: "a@b.c" - password: "verysecure" - sshKey: "ssh-ed25519 AAAA..." -aws: - # (optional) AMI override for all nodes. Example: `ami-foobar123`. - # ec2AMIOverride: - - external: - # (optional) List of subnet IDs within an existing VPC to deploy master nodes into. - # Required to use an existing VPC and the list must match the AZ count. - # - # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` - # masterSubnetIDs: - - # (optional) If set, the given Route53 zone ID will be used as the internal (private) zone. - # This zone will be used to create etcd DNS records as well as internal API and internal Ingress records. - # If set, no additional private zone will be created. - # - # Example: `"Z1ILINNUJGTAO1"` - # privateZone: - - # (optional) ID of an existing VPC to launch nodes into. - # If unset a new VPC is created. - # - # Example: `vpc-123456` - # vpcID: - - # (optional) List of subnet IDs within an existing VPC to deploy worker nodes into. - # Required to use an existing VPC and the list must match the AZ count. - # - # Example: `["subnet-111111", "subnet-222222", "subnet-333333"]` - # workerSubnetIDs: - - # (optional) Extra AWS tags to be applied to created resources. - # - # Example: `{ "key" = "value", "foo" = "bar" }` - # extraTags: - - # (optional) Name of IAM role to use to access AWS in order to deploy the Tectonic Cluster. - # The name is also the full role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # installerRole: - - master: - # (optional) This configures master availability zones and their corresponding subnet CIDRs directly. - # - # Example: - # `{ eu-west-1a = "10.0.0.0/20", eu-west-1b = "10.0.16.0/20" }` - # customSubnets: - - # Instance size for the master node(s). Example: `t2.medium`. - ec2Type: t2.medium - - # (optional) List of additional security group IDs for master nodes. - # - # Example: `["sg-51530134", "sg-b253d7cc"]` - # extraSGIDs: - - # (optional) Name of IAM role to use for the instance profiles of master nodes. - # The name is also the last part of a role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # * Role Name = tectonic-installer - # iamRoleName: - - rootVolume: - # The amount of provisioned IOPS for the root block device of master nodes. - # Ignored if the volume type is not io1. - iops: 100 - - # The size of the volume in gigabytes for the root block device of master nodes. - size: 30 - - # The type of volume for the root block device of master nodes. - type: gp2 - - # (optional) If set to true, create private-facing ingress resources (ELB, A-records). - # If set to false, no private-facing ingress resources will be provisioned and all DNS records will be created in the public Route53 zone. - # privateEndpoints: true - - # (optional) This declares the AWS credentials profile to use. - # profile: default - - # (optional) If set to true, create public-facing ingress resources (ELB, A-records). - # If set to false, no public-facing ingress resources will be created. - # publicEndpoints: true - - # The target AWS region for the cluster. - region: us-east-1 - - # Block of IP addresses used by the VPC. - # This should not overlap with any other networks, such as a private datacenter connected via Direct Connect. - vpcCIDRBlock: 10.0.0.0/16 - - worker: - # (optional) This configures worker availability zones and their corresponding subnet CIDRs directly. - # - # Example: `{ eu-west-1a = "10.0.64.0/20", eu-west-1b = "10.0.80.0/20" }` - # customSubnets: - - # Instance size for the worker node(s). Example: `t2.medium`. - ec2Type: t2.medium - - # (optional) List of additional security group IDs for worker nodes. - # - # Example: `["sg-51530134", "sg-b253d7cc"]` - # extraSGIDs: - - # (optional) Name of IAM role to use for the instance profiles of worker nodes. - # The name is also the last part of a role's ARN. - # - # Example: - # * Role ARN = arn:aws:iam::123456789012:role/tectonic-installer - # * Role Name = tectonic-installer - # iamRoleName: - - # (optional) List of ELBs to attach all worker instances to. - # This is useful for exposing NodePort services via load-balancers managed separately from the cluster. - # - # Example: - # * `["ingress-nginx"]` - # loadBalancers: - - rootVolume: - # The amount of provisioned IOPS for the root block device of worker nodes. - # Ignored if the volume type is not io1. - iops: 100 - - # The size of the volume in gigabytes for the root block device of worker nodes. - size: 30 - - # The type of volume for the root block device of worker nodes. - type: gp2 - -# The base DNS domain of the cluster. It must NOT contain a trailing period. Some -# DNS providers will automatically add this if necessary. -# -# Example: `openshift.example.com`. -# -# Note: This field MUST be set manually prior to creating the cluster. -# This applies only to cloud platforms. -# -# For AWS, this must be a previously-existing public Route 53 zone. -# You can check for any already in your account with: -# -# $ aws route53 list-hosted-zones --query 'HostedZones[? !(Config.PrivateZone)].Name' --output text -baseDomain: - -ca: - # (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate. - # If left blank, a CA certificate will be automatically generated. - # cert: - - # (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate. - # This field is mandatory if `ca_cert` is set. - # key: - - # (optional) The algorithm used to generate ca_key. - # The default value is currently recommended. - # This field is mandatory if `ca_cert` is set. - # keyAlg: RSA - -iscsi: - # (optional) Start iscsid.service to enable iscsi volume attachment. - # enabled: false - -master: - # The name of the node pool(s) to use for master nodes - nodePools: - - master - -# The name of the cluster. -# If used in a cloud-environment, this will be prepended to `baseDomain` resulting in the URL to the Tectonic console. -# -# Note: This field MUST be set manually prior to creating the cluster. -# Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints. -name: - -networking: - # (optional) This declares the MTU used by Calico. - # mtu: - - # This declares the IP range to assign Kubernetes pod IPs in CIDR notation. - podCIDR: 10.2.0.0/16 - - # This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation. - # The maximum size of this IP range is /12 - serviceCIDR: 10.3.0.0/16 - - # (optional) Configures the network to be used in Tectonic. One of the following values can be used: - # - # - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN. - # - # - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico. - # - # - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only. - # - # - "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services. - # type: flannel - -nodePools: - # The number of master nodes to be created. - # This applies only to cloud platforms. - - count: 1 - name: master - - # The number of worker nodes to be created. - # This applies only to cloud platforms. - - count: 3 - name: worker - -# The platform used for deploying. -platform: aws - -# The pull secret in JSON format. -# This is known to be a "Docker pull secret" as produced by the docker login [1] command. -# A sample JSON content is shown in [2]. -# You can download the pull secret from your Account overview page at [3]. -# -# [1] https://docs.docker.com/engine/reference/commandline/login/ -# -# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup -# -# [3] https://account.coreos.com/overview -pullSecret: '{"auths": {}}' - -worker: - # The name of the node pool(s) to use for workers - nodePools: - - worker diff --git a/examples/libvirt.yaml b/examples/libvirt.yaml deleted file mode 100644 index 7065c8ad9ae..00000000000 --- a/examples/libvirt.yaml +++ /dev/null @@ -1,103 +0,0 @@ -admin: - email: a@b.c - password: verysecure - sshKey: "ssh-ed25519 AAAA..." -# The base DNS domain of the cluster. It must NOT contain a trailing period. Some -# DNS providers will automatically add this if necessary. -# -# Example: `openshift.example.com`. -# -# Note: This field MUST be set manually prior to creating the cluster. -baseDomain: - -libvirt: - # You must specify an IP address here that libvirtd is listening on, - # and that the cluster-api controller pod will be able to connect - # to. Often 192.168.122.1 is the default for the virbr0 interface. - uri: qemu+tcp://192.168.122.1/system - network: - name: tectonic - ifName: tt0 - ipRange: 192.168.124.0/24 - image: http://aos-ostree.rhev-ci-vms.eng.rdu2.redhat.com/rhcos/images/cloud/latest/rhcos-qemu.qcow2.gz - -ca: - # (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate. - # If left blank, a CA certificate will be automatically generated. - # cert: - - # (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate. - # This field is mandatory if `ca_cert` is set. - # key: - - # (optional) The algorithm used to generate ca_key. - # The default value is currently recommended. - # This field is mandatory if `ca_cert` is set. - # keyAlg: RSA - -iscsi: - # (optional) Start iscsid.service to enable iscsi volume attachment. - # enabled: false - -master: - nodePools: - - master - -# The name of the cluster. -# If used in a cloud-environment, this will be prepended to `baseDomain` resulting in the URL to the Tectonic console. -# -# Note: This field MUST be set manually prior to creating the cluster. -# Warning: Special characters in the name like '.' may cause errors on OpenStack platforms due to resource name constraints. -name: - -networking: - # (optional) This declares the MTU used by Calico. - # mtu: - - # (optional) This declares the IP range to assign Kubernetes pod IPs in CIDR notation. - podCIDR: 10.2.0.0/16 - - # (optional) This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation. - # The maximum size of this IP range is /12 - serviceCIDR: 10.3.0.0/16 - - # (optional) Configures the network to be used in Tectonic. One of the following values can be used: - # - # - "flannel": enables overlay networking only. This is implemented by flannel using VXLAN. - # - # - "canal": enables overlay networking including network policy. Overlay is implemented by flannel using VXLAN. Network policy is implemented by Calico. - # - # - "calico-ipip": [ALPHA] enables BGP based networking. Routing and network policy is implemented by Calico. Note this has been tested on baremetal installations only. - # - # - "none": disables the installation of any Pod level networking layer provided by Tectonic. By setting this value, users are expected to deploy their own solution to enable network connectivity for Pods and Services. - # type: flannel - -nodePools: - # The number of master nodes to be created. - # This applies only to cloud platforms. - - count: 1 - name: master - - # The number of worker nodes to be created. - # This applies only to cloud platforms. - - count: 2 - name: worker - -# The platform used for deploying. -platform: libvirt - -# The pull secret in JSON format. -# This is known to be a "Docker pull secret" as produced by the docker login [1] command. -# A sample JSON content is shown in [2]. -# You can download the pull secret from your Account overview page at [3]. -# -# [1] https://docs.docker.com/engine/reference/commandline/login/ -# -# [2] https://coreos.com/os/docs/latest/registry-authentication.html#manual-registry-auth-setup -# -# [3] https://account.coreos.com/overview -pullSecret: '{"auths": {}}' - -worker: - nodePools: - - worker diff --git a/hack/test-bazel-build-tarball.sh b/hack/test-bazel-build-tarball.sh deleted file mode 100755 index 763cb41ebe1..00000000000 --- a/hack/test-bazel-build-tarball.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh -if [ "$IS_CONTAINER" != "" ]; then - set -x - bazel --output_base=/tmp build "$@" tarball -else - podman run --rm \ - --env IS_CONTAINER=TRUE \ - --volume "${PWD}:${PWD}:z" \ - --workdir "${PWD}" \ - quay.io/coreos/tectonic-builder:bazel-v0.3 \ - ./hack/test-bazel-build-tarball.sh -fi diff --git a/installer/cmd/tectonic/main.go b/installer/cmd/tectonic/main.go deleted file mode 100644 index b880720c0eb..00000000000 --- a/installer/cmd/tectonic/main.go +++ /dev/null @@ -1,54 +0,0 @@ -package main - -import ( - "os" - - log "github.com/sirupsen/logrus" - "gopkg.in/alecthomas/kingpin.v2" - - "github.com/openshift/installer/installer/pkg/workflow" -) - -var ( - clusterInitCommand = kingpin.Command("init", "Initialize a new Tectonic cluster") - clusterInitConfigFlag = clusterInitCommand.Flag("config", "Cluster specification file").Required().ExistingFile() - - clusterInstallCommand = kingpin.Command("install", "Create a new Tectonic cluster") - clusterInstallDirFlag = clusterInstallCommand.Flag("dir", "Cluster directory").Default(".").ExistingDir() - - clusterDestroyCommand = kingpin.Command("destroy", "Destroy an existing Tectonic cluster") - clusterDestroyDirFlag = clusterDestroyCommand.Flag("dir", "Cluster directory").Default(".").ExistingDir() - clusterDestroyContOnErr = clusterDestroyCommand.Flag("continue-on-error", "Log errors, but attempt to continue cleaning up the cluster. THIS MAY LEAK RESOURCES, because you may not have enough state left after a partial removal to be able to perform a second destroy.").Default("false").Bool() - - convertCommand = kingpin.Command("convert", "Convert a tfvars.json to a Tectonic config.yaml") - convertConfigFlag = convertCommand.Flag("config", "tfvars.json file").Required().ExistingFile() - - logLevel = kingpin.Flag("log-level", "log level (e.g. \"debug\")").Default("info").Enum("debug", "info", "warn", "error", "fatal", "panic") -) - -func main() { - var w workflow.Workflow - - switch kingpin.Parse() { - case clusterInitCommand.FullCommand(): - w = workflow.InitWorkflow(*clusterInitConfigFlag) - case clusterInstallCommand.FullCommand(): - w = workflow.InstallWorkflow(*clusterInstallDirFlag) - case clusterDestroyCommand.FullCommand(): - w = workflow.DestroyWorkflow(*clusterDestroyDirFlag, *clusterDestroyContOnErr) - case convertCommand.FullCommand(): - w = workflow.ConvertWorkflow(*convertConfigFlag) - } - - l, err := log.ParseLevel(*logLevel) - if err != nil { - // By definition we should never enter this condition since kingpin should be guarding against incorrect values. - log.Fatalf("invalid log-level: %v", err) - } - log.SetLevel(l) - - if err := w.Execute(); err != nil { - log.Fatal(err) - os.Exit(1) - } -} diff --git a/installer/pkg/config-generator/fixtures/generated/tls/aggregator-ca.crt b/installer/pkg/config-generator/fixtures/generated/tls/aggregator-ca.crt deleted file mode 100644 index 27630f2cc4b..00000000000 --- a/installer/pkg/config-generator/fixtures/generated/tls/aggregator-ca.crt +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDMzCCAhugAwIBAgIISI9lAF1J/fMwDQYJKoZIhvcNAQELBQAwJjESMBAGA1UE -CxMJb3BlbnNoaWZ0MRAwDgYDVQQDEwdyb290LWNhMB4XDTE4MDkwNzIxNTIyMVoX -DTI4MDkwNDIxNTIyMlowKDERMA8GA1UECxMIYm9vdGt1YmUxEzARBgNVBAMTCmFn -Z3JlZ2F0b3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNm3Hre4oy -+J31X3i4dxy8hejzBISY8TSeoXMP9AZczQM1oyekSgf5EMSfh9DrDcKmPuTMhl6a -w6ZSjPDbrFlKrF2oIpq04Zn160i95QLC7zI8WzU/cNDS22J9pk8k9K47c/hZKRrd -AjT0rNI4qpVsDv43O1H2s5M6HiXB62rwakEALoATeufaPJEcAgP31nH9FhHft3uO -QkOur6iuXBDtv/FtPEFhmR5rDBYvUxaKXfB5c+TCevTZtjmP2bdRrvyHdWWapPtJ -auoGcV5s4Skp3tcEy24Qrl8FxR+Rsy7V+eYUPIWG1mDugmwpgq4SrB9idGoc/jqu -p6oDKRZUpxZdAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBRFMWpOb15dmp4aVFFdPtY/lrgkGTAfBgNVHSMEGDAWgBRF -MWpOb15dmp4aVFFdPtY/lrgkGTANBgkqhkiG9w0BAQsFAAOCAQEAjzLnsIZ+Jwqr -8h4TucqabLYkW1b/mk3k2GDg46xTOGpJo8UGesboclDxK/rtmDObgMLh2I9ekkPY -Yr1QNE5W4vqDOsmBm2MkqDVUg/EuOdSiNmWcJs/du452AVtk80yXTSaBQuW85SAe -AlG2CXadwTkxtLypLsUeOriKDRCV3hLCwd8lXwAHsjoU5aBLgin7lsXItoiM19LP -eJ06zH3FaOc4Kowf8JllJXel414DBsP8nX23snETIotxPXFol9xQIkjHCWaxKQSc -FWlmnA2exJprQHrt28C5W9x6odc27zKxS2D06IzETE4BtinwYhepb7P/qTAo+MX5 -FYZx86N7eg== ------END CERTIFICATE----- diff --git a/installer/pkg/config-generator/fixtures/kube-system.yaml b/installer/pkg/config-generator/fixtures/kube-system.yaml deleted file mode 100644 index 0c2934533c6..00000000000 --- a/installer/pkg/config-generator/fixtures/kube-system.yaml +++ /dev/null @@ -1,109 +0,0 @@ -apiVersion: v1 -data: - install-config: | - admin: - email: test@coreos.com - password: asd123 - baseDomain: cluster.com - clusterID: "" - machines: - - name: master - platform: - aws: - iamRoleName: "" - rootVolume: - iops: 100 - size: 30 - type: gp2 - type: t2.medium - replicas: 3 - - name: worker - platform: - aws: - iamRoleName: "" - rootVolume: - iops: 100 - size: 30 - type: gp2 - type: t2.medium - replicas: 3 - metadata: - creationTimestamp: null - name: test - networking: - podCIDR: 10.2.0.0/16 - serviceCIDR: 10.3.0.0/16 - type: canal - platform: - aws: - region: us-east-1 - vpcCIDRBlock: 10.0.0.0/16 - vpcID: "" - pullSecret: '{"auths": {}}' - kco-config: | - apiVersion: v1 - authConfig: - oidc_client_id: tectonic-kubectl - oidc_groups_claim: groups - oidc_issuer_url: https://test.cluster.com/identity - oidc_username_claim: email - cloudProviderConfig: - cloud_config_path: "" - cloud_provider_profile: aws - clusterConfig: - apiserver_url: https://test-api.cluster.com:6443 - dnsConfig: - clusterIP: 10.3.0.10 - kind: KubeCoreOperatorConfig - networkConfig: - advertise_address: 0.0.0.0 - cluster_cidr: 10.2.0.0/16 - etcd_servers: https://test-etcd-0.cluster.com:2379,https://test-etcd-1.cluster.com:2379,https://test-etcd-2.cluster.com:2379 - service_cidr: 10.3.0.0/16 - routingConfig: - subdomain: test.cluster.com - mao-config: | - apiServiceCA: | - -----BEGIN CERTIFICATE----- - MIIDMzCCAhugAwIBAgIISI9lAF1J/fMwDQYJKoZIhvcNAQELBQAwJjESMBAGA1UE - CxMJb3BlbnNoaWZ0MRAwDgYDVQQDEwdyb290LWNhMB4XDTE4MDkwNzIxNTIyMVoX - DTI4MDkwNDIxNTIyMlowKDERMA8GA1UECxMIYm9vdGt1YmUxEzARBgNVBAMTCmFn - Z3JlZ2F0b3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNm3Hre4oy - +J31X3i4dxy8hejzBISY8TSeoXMP9AZczQM1oyekSgf5EMSfh9DrDcKmPuTMhl6a - w6ZSjPDbrFlKrF2oIpq04Zn160i95QLC7zI8WzU/cNDS22J9pk8k9K47c/hZKRrd - AjT0rNI4qpVsDv43O1H2s5M6HiXB62rwakEALoATeufaPJEcAgP31nH9FhHft3uO - QkOur6iuXBDtv/FtPEFhmR5rDBYvUxaKXfB5c+TCevTZtjmP2bdRrvyHdWWapPtJ - auoGcV5s4Skp3tcEy24Qrl8FxR+Rsy7V+eYUPIWG1mDugmwpgq4SrB9idGoc/jqu - p6oDKRZUpxZdAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTAD - AQH/MB0GA1UdDgQWBBRFMWpOb15dmp4aVFFdPtY/lrgkGTAfBgNVHSMEGDAWgBRF - MWpOb15dmp4aVFFdPtY/lrgkGTANBgkqhkiG9w0BAQsFAAOCAQEAjzLnsIZ+Jwqr - 8h4TucqabLYkW1b/mk3k2GDg46xTOGpJo8UGesboclDxK/rtmDObgMLh2I9ekkPY - Yr1QNE5W4vqDOsmBm2MkqDVUg/EuOdSiNmWcJs/du452AVtk80yXTSaBQuW85SAe - AlG2CXadwTkxtLypLsUeOriKDRCV3hLCwd8lXwAHsjoU5aBLgin7lsXItoiM19LP - eJ06zH3FaOc4Kowf8JllJXel414DBsP8nX23snETIotxPXFol9xQIkjHCWaxKQSc - FWlmnA2exJprQHrt28C5W9x6odc27zKxS2D06IzETE4BtinwYhepb7P/qTAo+MX5 - FYZx86N7eg== - -----END CERTIFICATE----- - apiVersion: v1 - aws: - availabilityZone: "" - clusterID: "" - clusterName: test - image: ami-0af8953af3ec06b7c - region: us-east-1 - replicas: 3 - kind: machineAPIOperatorConfig - libvirt: null - provider: aws - targetNamespace: openshift-cluster-api - network-config: | - apiVersion: v1 - calicoConfig: - mtu: "1480" - kind: TectonicNetworkOperatorConfig - networkProfile: canal - podCIDR: 10.2.0.0/16 -kind: ConfigMap -metadata: - name: cluster-config-v1 - namespace: kube-system diff --git a/installer/pkg/config-generator/fixtures/test-aws.yaml b/installer/pkg/config-generator/fixtures/test-aws.yaml deleted file mode 100644 index 21fd9ed9130..00000000000 --- a/installer/pkg/config-generator/fixtures/test-aws.yaml +++ /dev/null @@ -1,40 +0,0 @@ -name: test -baseDomain: cluster.com -platform: aws -networking: - type: canal - mtu: 1480 - podCIDR: 10.2.0.0/16 - serviceCIDR: 10.3.0.0/16 -master: - nodePools: - - master -worker: - nodePools: - - worker -pullSecret: '{"auths": {}}' -admin: - email: test@coreos.com - password: asd123 -aws: - ec2AMIOverride: ami-0af8953af3ec06b7c - region: us-east-1 - sshKey: tectonic - vpcCIDRBlock: 10.0.0.0/16 - master: - ec2Type: t2.medium - rootVolume: - iops: 100 - size: 30 - type: gp2 - worker: - ec2Type: t2.medium - rootVolume: - iops: 100 - size: 30 - type: gp2 -nodePools: - - name: master - count: 3 - - name: worker - count: 3 diff --git a/installer/pkg/config-generator/fixtures/test.yaml b/installer/pkg/config-generator/fixtures/test.yaml deleted file mode 100644 index e8784ac3f2c..00000000000 --- a/installer/pkg/config-generator/fixtures/test.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: test -platform: aws -baseDomain: cluster.com -master: - nodePools: - - master -nodePools: - - name: master - count: 3 -aws: - ec2AMIOverride: ami-0af8953af3ec06b7c diff --git a/installer/pkg/config-generator/generator.go b/installer/pkg/config-generator/generator.go deleted file mode 100644 index 68e9278437b..00000000000 --- a/installer/pkg/config-generator/generator.go +++ /dev/null @@ -1,521 +0,0 @@ -package configgenerator - -import ( - "context" - "crypto/rand" - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "io/ioutil" - "net" - "path/filepath" - "strings" - - "github.com/apparentlymart/go-cidr/cidr" - "github.com/coreos/tectonic-config/config/kube-addon" - "github.com/coreos/tectonic-config/config/kube-core" - "github.com/coreos/tectonic-config/config/tectonic-network" - "github.com/coreos/tectonic-config/config/tectonic-utility" - "github.com/ghodss/yaml" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/openshift/installer/pkg/ipnet" - "github.com/openshift/installer/pkg/rhcos" - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/config" -) - -const ( - authConfigOIDCClientID = "tectonic-kubectl" - authConfigOIDCGroupsClaim = "groups" - authConfigOIDCUsernameClaim = "email" - networkConfigAdvertiseAddress = "0.0.0.0" - identityConfigConsoleClientID = "tectonic-console" - identityConfigKubectlClientID = "tectonic-kubectl" - ingressConfigIngressKind = "haproxy-router" - certificatesStrategy = "userProvidedCA" - identityAPIService = "tectonic-identity-api.tectonic-system.svc.cluster.local" - maoTargetNamespace = "openshift-cluster-api" -) - -// ConfigGenerator defines the cluster config generation for a cluster. -type ConfigGenerator struct { - config.Cluster -} - -type configurationObject struct { - metav1.TypeMeta - - Metadata metadata `json:"metadata,omitempty"` - Data data `json:"data,omitempty"` -} - -type data map[string]string - -type metadata struct { - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` -} - -type genericData map[string]interface{} - -// New returns a ConfigGenerator for a cluster. -func New(cluster config.Cluster) ConfigGenerator { - return ConfigGenerator{ - Cluster: cluster, - } -} - -// maoOperatorConfig contains configuration for mao managed stack -// TODO(enxebre): move up to "github.com/coreos/tectonic-config -type maoOperatorConfig struct { - metav1.TypeMeta `json:",inline"` - TargetNamespace string `json:"targetNamespace"` - APIServiceCA string `json:"apiServiceCA"` - Provider string `json:"provider"` - AWS *awsConfig `json:"aws"` - Libvirt *libvirtConfig `json:"libvirt"` -} - -type libvirtConfig struct { - ClusterName string `json:"clusterName"` - URI string `json:"uri"` - NetworkName string `json:"networkName"` - IPRange string `json:"iprange"` - Replicas int `json:"replicas"` -} - -type awsConfig struct { - ClusterName string `json:"clusterName"` - ClusterID string `json:"clusterID"` - Region string `json:"region"` - AvailabilityZone string `json:"availabilityZone"` - Image string `json:"image"` - Replicas int `json:"replicas"` -} - -func (c *ConfigGenerator) maoConfig(clusterDir string) (*maoOperatorConfig, error) { - cfg := maoOperatorConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "machineAPIOperatorConfig", - }, - - TargetNamespace: maoTargetNamespace, - } - - ca, err := ioutil.ReadFile(filepath.Join(clusterDir, aggregatorCACertPath)) - if err != nil { - return nil, fmt.Errorf("could not read aggregator CA: %v", err) - } - - cfg.APIServiceCA = string(ca) - cfg.Provider = tectonicCloudProvider(c.Platform) - - switch c.Platform { - case config.PlatformAWS: - var ami string - - if c.AWS.EC2AMIOverride != "" { - ami = c.AWS.EC2AMIOverride - } else { - ami, err = rhcos.AMI(context.TODO(), rhcos.DefaultChannel, c.Region) - if err != nil { - return nil, fmt.Errorf("failed to lookup RHCOS AMI: %v", err) - } - } - - cfg.AWS = &awsConfig{ - ClusterName: c.Name, - ClusterID: c.ClusterID, - Region: c.Region, - AvailabilityZone: "", - Image: ami, - Replicas: c.NodeCount(c.Worker.NodePools), - } - - case config.PlatformLibvirt: - cfg.Libvirt = &libvirtConfig{ - ClusterName: c.Name, - URI: c.Libvirt.URI, - NetworkName: c.Libvirt.Network.Name, - IPRange: c.Libvirt.IPRange, - Replicas: c.NodeCount(c.Worker.NodePools), - } - - default: - return nil, fmt.Errorf("unknown provider for machine-api-operator: %v", cfg.Provider) - } - - return &cfg, nil -} - -// KubeSystem returns, if successful, a yaml string for the kube-system. -func (c *ConfigGenerator) KubeSystem(clusterDir string) (string, error) { - coreConfig, err := c.coreConfig() - if err != nil { - return "", err - } - installConfig, err := c.installConfig() - if err != nil { - return "", err - } - maoConfig, err := c.maoConfig(clusterDir) - if err != nil { - return "", err - } - - return configMap("kube-system", genericData{ - "kco-config": coreConfig, - "network-config": c.networkConfig(), - "install-config": installConfig, - "mao-config": maoConfig, - }) -} - -// TectonicSystem returns, if successful, a yaml string for the tectonic-system. -func (c *ConfigGenerator) TectonicSystem() (string, error) { - utilityConfig, err := c.utilityConfig() - if err != nil { - return "", err - } - addonConfig, err := c.addonConfig() - if err != nil { - return "", err - } - return configMap("tectonic-system", genericData{ - "addon-config": addonConfig, - "utility-config": utilityConfig, - }) -} - -// InstallConfig returns a YAML-rendered Kubernetes object with the user-supplied cluster configuration. -func (c *ConfigGenerator) InstallConfig() (string, error) { - ic, err := c.installConfig() - if err != nil { - return "", err - } - return marshalYAML(ic) -} - -func (c *ConfigGenerator) installConfig() (*types.InstallConfig, error) { - _, podCIDR, err := net.ParseCIDR(c.Networking.PodCIDR) - if err != nil { - return nil, err - } - _, serviceCIDR, err := net.ParseCIDR(c.Networking.ServiceCIDR) - if err != nil { - return nil, err - } - - var ( - platform types.Platform - masterPlatform types.MachinePoolPlatform - workerPlatform types.MachinePoolPlatform - ) - switch c.Platform { - case config.PlatformAWS: - platform.AWS = &types.AWSPlatform{ - Region: c.Region, - VPCID: c.VPCID, - VPCCIDRBlock: c.VPCCIDRBlock, - } - masterPlatform.AWS = &types.AWSMachinePoolPlatform{ - InstanceType: c.AWS.Master.EC2Type, - IAMRoleName: c.AWS.Master.IAMRoleName, - EC2RootVolume: types.EC2RootVolume{ - IOPS: c.AWS.Master.MasterRootVolume.IOPS, - Size: c.AWS.Master.MasterRootVolume.Size, - Type: c.AWS.Master.MasterRootVolume.Type, - }, - } - workerPlatform.AWS = &types.AWSMachinePoolPlatform{ - InstanceType: c.AWS.Worker.EC2Type, - IAMRoleName: c.AWS.Worker.IAMRoleName, - EC2RootVolume: types.EC2RootVolume{ - IOPS: c.AWS.Worker.WorkerRootVolume.IOPS, - Size: c.AWS.Worker.WorkerRootVolume.Size, - Type: c.AWS.Worker.WorkerRootVolume.Type, - }, - } - case config.PlatformLibvirt: - platform.Libvirt = &types.LibvirtPlatform{ - URI: c.URI, - Network: types.LibvirtNetwork{ - Name: c.Network.Name, - IfName: c.Network.IfName, - IPRange: c.Network.IPRange, - }, - } - masterPlatform.Libvirt = &types.LibvirtMachinePoolPlatform{ - ImagePool: "default", - ImageVolume: "coreos_base", - } - workerPlatform.Libvirt = &types.LibvirtMachinePoolPlatform{ - ImagePool: "default", - ImageVolume: "coreos_base", - } - default: - return nil, fmt.Errorf("installconfig: invalid platform %s", c.Platform) - } - masterCount := int64(c.NodeCount(c.Master.NodePools)) - workerCount := int64(c.NodeCount(c.Worker.NodePools)) - - return &types.InstallConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.Name, - }, - ClusterID: c.ClusterID, - Admin: types.Admin{ - Email: c.Admin.Email, - Password: c.Admin.Password, - SSHKey: c.Admin.SSHKey, - }, - BaseDomain: c.BaseDomain, - PullSecret: c.PullSecret, - Networking: types.Networking{ - Type: types.NetworkType(string(c.Networking.Type)), - ServiceCIDR: ipnet.IPNet{IPNet: *serviceCIDR}, - PodCIDR: ipnet.IPNet{IPNet: *podCIDR}, - }, - Platform: platform, - Machines: []types.MachinePool{{ - Name: "master", - Replicas: &masterCount, - Platform: masterPlatform, - }, { - Name: "worker", - Replicas: &workerCount, - Platform: workerPlatform, - }}, - }, nil -} - -// CoreConfig returns, if successful, a yaml string for the on-disk kco-config. -func (c *ConfigGenerator) CoreConfig() (string, error) { - coreConfig, err := c.coreConfig() - if err != nil { - return "", err - } - return marshalYAML(coreConfig) -} - -func (c *ConfigGenerator) addonConfig() (*kubeaddon.OperatorConfig, error) { - addonConfig := kubeaddon.OperatorConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: kubeaddon.APIVersion, - Kind: kubeaddon.Kind, - }, - } - addonConfig.CloudProvider = tectonicCloudProvider(c.Platform) - addonConfig.ClusterConfig.APIServerURL = c.getAPIServerURL() - registrySecret, err := generateRandomID(16) - if err != nil { - return nil, err - } - addonConfig.RegistryHTTPSecret = registrySecret - return &addonConfig, nil -} - -func (c *ConfigGenerator) coreConfig() (*kubecore.OperatorConfig, error) { - coreConfig := kubecore.OperatorConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: kubecore.APIVersion, - Kind: kubecore.Kind, - }, - } - coreConfig.ClusterConfig.APIServerURL = c.getAPIServerURL() - coreConfig.AuthConfig.OIDCClientID = authConfigOIDCClientID - coreConfig.AuthConfig.OIDCIssuerURL = c.getOicdIssuerURL() - coreConfig.AuthConfig.OIDCGroupsClaim = authConfigOIDCGroupsClaim - coreConfig.AuthConfig.OIDCUsernameClaim = authConfigOIDCUsernameClaim - - cidrhost, err := cidrhost(c.Cluster.Networking.ServiceCIDR, 10) - if err != nil { - return nil, err - } - coreConfig.DNSConfig.ClusterIP = cidrhost - - coreConfig.CloudProviderConfig.CloudConfigPath = "" - coreConfig.CloudProviderConfig.CloudProviderProfile = k8sCloudProvider(c.Cluster.Platform) - - coreConfig.RoutingConfig.Subdomain = c.getBaseAddress() - - coreConfig.NetworkConfig.ClusterCIDR = c.Cluster.Networking.PodCIDR - coreConfig.NetworkConfig.ServiceCIDR = c.Cluster.Networking.ServiceCIDR - coreConfig.NetworkConfig.AdvertiseAddress = networkConfigAdvertiseAddress - coreConfig.NetworkConfig.EtcdServers = c.getEtcdServersURLs() - - return &coreConfig, nil -} - -func (c *ConfigGenerator) networkConfig() *tectonicnetwork.OperatorConfig { - networkConfig := tectonicnetwork.OperatorConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: tectonicnetwork.APIVersion, - Kind: tectonicnetwork.Kind, - }, - } - - networkConfig.PodCIDR = c.Cluster.Networking.PodCIDR - networkConfig.CalicoConfig.MTU = c.Cluster.Networking.MTU - networkConfig.NetworkProfile = tectonicnetwork.NetworkType(c.Cluster.Networking.Type) - - return &networkConfig -} - -func (c *ConfigGenerator) utilityConfig() (*tectonicutility.OperatorConfig, error) { - utilityConfig := tectonicutility.OperatorConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: tectonicutility.APIVersion, - Kind: tectonicutility.Kind, - }, - } - - utilityConfig.TectonicConfigMapConfig.CertificatesStrategy = certificatesStrategy - utilityConfig.TectonicConfigMapConfig.ClusterID = c.Cluster.Internal.ClusterID - utilityConfig.TectonicConfigMapConfig.ClusterName = c.Cluster.Name - utilityConfig.TectonicConfigMapConfig.InstallerPlatform = tectonicCloudProvider(c.Platform) - utilityConfig.TectonicConfigMapConfig.KubeAPIServerURL = c.getAPIServerURL() - // TODO: Speficy what's a version in ut2 and set it here - utilityConfig.TectonicConfigMapConfig.TectonicVersion = "ut2" - - return &utilityConfig, nil -} - -func configMap(namespace string, unmarshaledData genericData) (string, error) { - data := make(data) - - for key, obj := range unmarshaledData { - str, err := marshalYAML(obj) - if err != nil { - return "", err - } - data[key] = str - } - - configurationObject := configurationObject{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - Metadata: metadata{ - Name: "cluster-config-v1", - Namespace: namespace, - }, - Data: data, - } - - str, err := marshalYAML(configurationObject) - if err != nil { - return "", err - } - return str, nil -} - -func marshalYAML(obj interface{}) (string, error) { - data, err := yaml.Marshal(&obj) - if err != nil { - return "", err - } - - return string(data), nil -} - -func (c *ConfigGenerator) getEtcdServersURLs() string { - etcdServers := make([]string, c.Cluster.NodeCount(c.Cluster.Master.NodePools)) - for i := range etcdServers { - etcdServers[i] = fmt.Sprintf("https://%s-etcd-%v.%s:2379", c.Cluster.Name, i, c.Cluster.BaseDomain) - } - return strings.Join(etcdServers, ",") -} - -func (c *ConfigGenerator) getAPIServerURL() string { - return fmt.Sprintf("https://%s-api.%s:6443", c.Cluster.Name, c.Cluster.BaseDomain) -} - -func (c *ConfigGenerator) getBaseAddress() string { - return fmt.Sprintf("%s.%s", c.Cluster.Name, c.Cluster.BaseDomain) -} - -func (c *ConfigGenerator) getOicdIssuerURL() string { - return fmt.Sprintf("https://%s.%s/identity", c.Cluster.Name, c.Cluster.BaseDomain) -} - -// generateRandomID reproduce tf random_id behaviour -// TODO: re-evaluate solution -func generateRandomID(byteLength int) (string, error) { - bytes := make([]byte, byteLength) - - n, err := rand.Reader.Read(bytes) - if n != byteLength { - return "", errors.New("generated insufficient random bytes") - } - if err != nil { - return "", err - } - - b64Str := base64.RawURLEncoding.EncodeToString(bytes) - - return b64Str, nil -} - -// GenerateClusterID reproduce tf cluster_id behaviour -// https://github.com/coreos/tectonic-installer/blob/master/modules/tectonic/assets.tf#L81 -// TODO: re-evaluate solution -func GenerateClusterID(byteLength int) (string, error) { - randomID, err := generateRandomID(byteLength) - if err != nil { - return "", err - } - bytes, err := base64.RawURLEncoding.DecodeString(randomID) - hexStr := hex.EncodeToString(bytes) - return fmt.Sprintf("%s-%s-%s-%s-%s", - hexStr[0:8], - hexStr[8:12], - hexStr[12:16], - hexStr[16:20], - hexStr[20:32]), nil -} - -// cidrhost takes an IP address range in CIDR notation -// and creates an IP address with the given host number. -// If given host number is negative, the count starts from the end of the range -// Reproduces tf behaviour. -// TODO: re-evaluate solution -func cidrhost(iprange string, hostNum int) (string, error) { - _, network, err := net.ParseCIDR(iprange) - if err != nil { - return "", fmt.Errorf("invalid CIDR expression (%s): %s", iprange, err) - } - - ip, err := cidr.Host(network, hostNum) - if err != nil { - return "", err - } - - return ip.String(), nil -} - -// Converts a platform to the cloudProvider that k8s understands -func k8sCloudProvider(platform config.Platform) string { - switch platform { - case config.PlatformAWS: - return "aws" - case config.PlatformLibvirt: - return "" - } - panic("invalid platform") -} - -// Converts a platform to the cloudProvider that Tectonic understands -func tectonicCloudProvider(platform config.Platform) string { - switch platform { - case config.PlatformAWS: - return "aws" - case config.PlatformLibvirt: - return "libvirt" - } - panic("invalid platform") -} diff --git a/installer/pkg/config-generator/generator_test.go b/installer/pkg/config-generator/generator_test.go deleted file mode 100644 index eb5e67b89fa..00000000000 --- a/installer/pkg/config-generator/generator_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package configgenerator - -import ( - "crypto/x509" - "crypto/x509/pkix" - "io/ioutil" - "os" - "testing" - - "github.com/openshift/installer/pkg/asset/tls" - "github.com/openshift/installer/pkg/types/config" - "github.com/stretchr/testify/assert" -) - -func initConfig(t *testing.T, file string) ConfigGenerator { - cluster, err := config.ParseConfigFile("./fixtures/" + file) - if err != nil { - t.Fatalf("Test case TestUrlFunctions: failed to parse test config, %s", err) - } - - return ConfigGenerator{ - *cluster, - } -} -func TestUrlFunctions(t *testing.T) { - config := initConfig(t, "test.yaml") - - testCases := []struct { - test string - got string - expected string - }{ - { - test: "getAPIServerURL", - got: config.getAPIServerURL(), - expected: "https://test-api.cluster.com:6443", - }, - { - test: "getBaseAddress", - got: config.getBaseAddress(), - expected: "test.cluster.com", - }, - { - test: "getOicdIssuerURL", - got: config.getOicdIssuerURL(), - expected: "https://test.cluster.com/identity", - }, - } - for _, tc := range testCases { - assert.Equal(t, tc.expected, tc.got) - } -} - -func TestGetEtcdServersURLs(t *testing.T) { - testCases := []struct { - test string - configFile string - expected string - }{ - { - test: "No ExternalServers", - configFile: "test.yaml", - expected: "https://test-etcd-0.cluster.com:2379,https://test-etcd-1.cluster.com:2379,https://test-etcd-2.cluster.com:2379", - }, - } - for _, tc := range testCases { - - config := initConfig(t, tc.configFile) - got := config.getEtcdServersURLs() - assert.Equal(t, tc.expected, got) - } -} - -func TestKubeSystem(t *testing.T) { - config := initConfig(t, "test-aws.yaml") - got, err := config.KubeSystem("./fixtures") - if err != nil { - t.Errorf("Test case TestKubeSystem: failed to get KubeSystem(): %s", err) - } - expected, err := ioutil.ReadFile("./fixtures/kube-system.yaml") - if err != nil { - t.Errorf("Test case TestKubeSystem: failed to ReadFile(): %s", err) - } - - assert.Equal(t, string(expected), got) -} - -func TestCIDRHost(t *testing.T) { - testCases := []struct { - test string - iprange string - hostNum int - expected string - }{ - { - test: "10.0.0.0/8", - iprange: "10.0.0.0/8", - hostNum: 8, - expected: "10.0.0.8", - }, - { - test: "10.3.0.0/16", - iprange: "10.3.0.0/16", - hostNum: 10, - expected: "10.3.0.10", - }, - } - for _, tc := range testCases { - got, err := cidrhost(tc.iprange, tc.hostNum) - if err != nil { - t.Errorf("Test case %s: failed to run cidrhost(): %s", tc.test, err) - } - assert.Equal(t, tc.expected, got) - } -} - -func TestGenerateCert(t *testing.T) { - caKey, err := tls.PrivateKey() - if err != nil { - t.Fatalf("Failed to generate Private Key: %v", err) - } - caCfg := &tls.CertCfg{ - Subject: pkix.Name{ - CommonName: "test-self-signed-ca", - OrganizationalUnit: []string{"openshift"}, - }, - Validity: tls.ValidityTenYears, - } - caCert, err := tls.SelfSignedCACert(caCfg, caKey) - if err != nil { - t.Fatalf("failed to generate self signed certificate: %v", err) - } - keyPath := "./test.key" - certPath := "./test.crt" - - cases := []struct { - cfg *tls.CertCfg - clusterDir string - err bool - }{ - { - cfg: &tls.CertCfg{ - Subject: pkix.Name{CommonName: "test-cert", OrganizationalUnit: []string{"test"}}, - KeyUsages: x509.KeyUsageKeyEncipherment, - DNSNames: []string{"test-api.kubernetes.default"}, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - Validity: tls.ValidityTenYears, - IsCA: false, - }, - clusterDir: "./", - err: false, - }, - } - for i, c := range cases { - _, _, err := generateCert(c.clusterDir, caKey, caCert, keyPath, certPath, c.cfg, false) - if err != nil { - no := "no" - if c.err { - no = "an" - } - t.Errorf("test case %d: expected %s error, got %v", i, no, err) - } - - if err := os.Remove(keyPath); err != nil { - t.Errorf("test case %d: failed to cleanup test key: %s, got %v", i, keyPath, err) - } - if err := os.Remove(certPath); err != nil { - t.Errorf("test case %d: failed to cleanup test certificate: %s, got %v", i, certPath, err) - } - } -} diff --git a/installer/pkg/config-generator/ignition.go b/installer/pkg/config-generator/ignition.go deleted file mode 100644 index a19a5ef49c9..00000000000 --- a/installer/pkg/config-generator/ignition.go +++ /dev/null @@ -1,146 +0,0 @@ -package configgenerator - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/url" - "path/filepath" - - ignconfig "github.com/coreos/ignition/config/v2_2" - ignconfigtypes "github.com/coreos/ignition/config/v2_2/types" - "github.com/openshift/installer/pkg/types/config" - "github.com/vincent-petithory/dataurl" -) - -const ( - caPath = "generated/tls/root-ca.crt" -) - -// GenerateIgnConfig generates Ignition configs for the workers and masters. -// It returns the content of the ignition files. -func (c *ConfigGenerator) GenerateIgnConfig(clusterDir string) (masterIgns []string, workerIgn string, err error) { - var masters config.NodePool - var workers config.NodePool - for _, pool := range c.NodePools { - switch pool.Name { - case "master": - masters = pool - case "worker": - workers = pool - default: - return nil, "", fmt.Errorf("unrecognized role: %s", pool.Name) - } - } - - ca, err := ioutil.ReadFile(filepath.Join(clusterDir, caPath)) - if err != nil { - return nil, "", err - } - - workerCfg, err := parseIgnFile(workers.IgnitionFile) - if err != nil { - return nil, "", fmt.Errorf("failed to parse Ignition config for workers: %v", err) - } - - // XXX(crawford): The SSH key should only be added to the bootstrap - // node. After that, MCO should be responsible for - // distributing SSH keys. - c.embedUserBlock(&workerCfg) - c.appendCertificateAuthority(&workerCfg, ca) - c.embedAppendBlock(&workerCfg, "worker", "") - - ign, err := json.Marshal(&workerCfg) - if err != nil { - return nil, "", fmt.Errorf("failed to marshal worker ignition: %v", err) - } - workerIgn = string(ign) - - masterCfg, err := parseIgnFile(masters.IgnitionFile) - if err != nil { - return nil, "", fmt.Errorf("failed to parse Ignition config for masters: %v", err) - } - - for i := 0; i < masters.Count; i++ { - ignCfg := masterCfg - - // XXX(crawford): The SSH key should only be added to the bootstrap - // node. After that, MCO should be responsible for - // distributing SSH keys. - c.embedUserBlock(&ignCfg) - c.appendCertificateAuthority(&ignCfg, ca) - c.embedAppendBlock(&ignCfg, "master", fmt.Sprintf("etcd_index=%d", i)) - - masterIgn, err := json.Marshal(&ignCfg) - if err != nil { - return nil, "", fmt.Errorf("failed to marshal master ignition: %v", err) - } - masterIgns = append(masterIgns, string(masterIgn)) - } - - return masterIgns, workerIgn, nil -} - -func parseIgnFile(filePath string) (ignconfigtypes.Config, error) { - if filePath == "" { - return ignconfigtypes.Config{ - Ignition: ignconfigtypes.Ignition{ - Version: ignconfigtypes.MaxVersion.String(), - }, - }, nil - } - - data, err := ioutil.ReadFile(filePath) - if err != nil { - return ignconfigtypes.Config{}, err - } - - cfg, rpt, _ := ignconfig.Parse(data) - if len(rpt.Entries) > 0 { - return ignconfigtypes.Config{}, fmt.Errorf("failed to parse ignition file %s: %s", filePath, rpt.String()) - } - - return cfg, nil -} - -func (c *ConfigGenerator) embedAppendBlock(ignCfg *ignconfigtypes.Config, role string, query string) { - appendBlock := ignconfigtypes.ConfigReference{ - Source: c.getMCSURL(role, query), - Verification: ignconfigtypes.Verification{Hash: nil}, - } - ignCfg.Ignition.Config.Append = append(ignCfg.Ignition.Config.Append, appendBlock) -} - -func (c *ConfigGenerator) appendCertificateAuthority(ignCfg *ignconfigtypes.Config, ca []byte) { - ignCfg.Ignition.Security.TLS.CertificateAuthorities = append(ignCfg.Ignition.Security.TLS.CertificateAuthorities, ignconfigtypes.CaReference{ - Source: dataurl.EncodeBytes(ca), - }) -} - -func (c *ConfigGenerator) embedUserBlock(ignCfg *ignconfigtypes.Config) { - userBlock := ignconfigtypes.PasswdUser{ - Name: "core", - SSHAuthorizedKeys: []ignconfigtypes.SSHAuthorizedKey{ - ignconfigtypes.SSHAuthorizedKey(c.SSHKey), - }, - } - - ignCfg.Passwd.Users = append(ignCfg.Passwd.Users, userBlock) -} - -func (c *ConfigGenerator) getMCSURL(role string, query string) string { - var u string - port := 49500 - - if role == "master" || role == "worker" { - u = func() *url.URL { - return &url.URL{ - Scheme: "https", - Host: fmt.Sprintf("%s-api.%s:%d", c.Name, c.BaseDomain, port), - Path: fmt.Sprintf("/config/%s", role), - RawQuery: query, - } - }().String() - } - return u -} diff --git a/installer/pkg/config-generator/tls.go b/installer/pkg/config-generator/tls.go deleted file mode 100644 index ee64315190c..00000000000 --- a/installer/pkg/config-generator/tls.go +++ /dev/null @@ -1,406 +0,0 @@ -package configgenerator - -import ( - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io/ioutil" - "net" - "path/filepath" - - "github.com/openshift/installer/installer/pkg/copy" - "github.com/openshift/installer/pkg/asset/tls" -) - -const ( - adminCertPath = "generated/tls/admin.crt" - adminKeyPath = "generated/tls/admin.key" - aggregatorCACertPath = "generated/tls/aggregator-ca.crt" - aggregatorCAKeyPath = "generated/tls/aggregator-ca.key" - apiServerCertPath = "generated/tls/apiserver.crt" - apiServerKeyPath = "generated/tls/apiserver.key" - apiServerProxyCertPath = "generated/tls/apiserver-proxy.crt" - apiServerProxyKeyPath = "generated/tls/apiserver-proxy.key" - etcdCACertPath = "generated/tls/etcd-ca.crt" - etcdCAKeyPath = "generated/tls/etcd-ca.key" - etcdClientCertPath = "generated/tls/etcd-client.crt" - etcdClientKeyPath = "generated/tls/etcd-client.key" - ingressCACertPath = "generated/tls/ingress-ca.crt" - ingressCertPath = "generated/tls/ingress.crt" - ingressKeyPath = "generated/tls/ingress.key" - kubeCACertPath = "generated/tls/kube-ca.crt" - kubeCAKeyPath = "generated/tls/kube-ca.key" - kubeletCertPath = "generated/tls/kubelet.crt" - kubeletKeyPath = "generated/tls/kubelet.key" - clusterAPIServerCertPath = "generated/tls/cluster-apiserver-ca.crt" - clusterAPIServerKeyPath = "generated/tls/cluster-apiserver-ca.key" - osAPIServerCertPath = "generated/tls/openshift-apiserver.crt" - osAPIServerKeyPath = "generated/tls/openshift-apiserver.key" - rootCACertPath = "generated/tls/root-ca.crt" - rootCAKeyPath = "generated/tls/root-ca.key" - serviceServingCACertPath = "generated/tls/service-serving-ca.crt" - serviceServingCAKeyPath = "generated/tls/service-serving-ca.key" - machineConfigServerCertPath = "generated/tls/machine-config-server.crt" - machineConfigServerKeyPath = "generated/tls/machine-config-server.key" - serviceAccountPubkeyPath = "generated/tls/service-account.pub" - serviceAccountPrivateKeyPath = "generated/tls/service-account.key" -) - -// GenerateTLSConfig fetches and validates the TLS cert files -// If no file paths were provided, the certs will be auto-generated -func (c *ConfigGenerator) GenerateTLSConfig(clusterDir string) error { - var caKey *rsa.PrivateKey - var caCert *x509.Certificate - var err error - - if c.CA.RootCAKeyPath == "" && c.CA.RootCACertPath == "" { - caCert, caKey, err = generateRootCert(clusterDir) - if err != nil { - return fmt.Errorf("failed to generate root CA certificate and key pair: %v", err) - } - } else { - // copy key and certificates - caCert, caKey, err = getCertFiles(clusterDir, c.CA.RootCACertPath, c.CA.RootCAKeyPath) - if err != nil { - return fmt.Errorf("failed to process CA certificate and key pair: %v", err) - } - } - - // generate kube CA - cfg := &tls.CertCfg{ - Subject: pkix.Name{CommonName: "kube-ca", OrganizationalUnit: []string{"bootkube"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: tls.ValidityTenYears, - IsCA: true, - } - kubeCAKey, kubeCACert, err := generateCert(clusterDir, caKey, caCert, kubeCAKeyPath, kubeCACertPath, cfg, false) - if err != nil { - return fmt.Errorf("failed to generate kubernetes CA: %v", err) - } - - // generate etcd CA - cfg = &tls.CertCfg{ - Subject: pkix.Name{CommonName: "etcd", OrganizationalUnit: []string{"etcd"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - IsCA: true, - Validity: tls.ValidityTenYears, - } - etcdCAKey, etcdCACert, err := generateCert(clusterDir, caKey, caCert, etcdCAKeyPath, etcdCACertPath, cfg, false) - if err != nil { - return fmt.Errorf("failed to generate etcd CA: %v", err) - } - - if err := copy.Copy(filepath.Join(clusterDir, etcdCAKeyPath), filepath.Join(clusterDir, "generated/tls/etcd-client-ca.key")); err != nil { - return fmt.Errorf("failed to import kube CA cert into ingress-ca.crt: %v", err) - } - if err := copy.Copy(filepath.Join(clusterDir, etcdCACertPath), filepath.Join(clusterDir, "generated/tls/etcd-client-ca.crt")); err != nil { - return fmt.Errorf("failed to import kube CA cert into ingress-ca.crt: %v", err) - } - - // generate etcd client certificate - cfg = &tls.CertCfg{ - Subject: pkix.Name{CommonName: "etcd", OrganizationalUnit: []string{"etcd"}}, - KeyUsages: x509.KeyUsageKeyEncipherment, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - Validity: tls.ValidityTenYears, - } - if _, _, err := generateCert(clusterDir, etcdCAKey, etcdCACert, etcdClientKeyPath, etcdClientCertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate etcd client certificate: %v", err) - } - - // generate aggregator CA - cfg = &tls.CertCfg{ - Subject: pkix.Name{CommonName: "aggregator", OrganizationalUnit: []string{"bootkube"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: tls.ValidityTenYears, - IsCA: true, - } - aggregatorCAKey, aggregatorCACert, err := generateCert(clusterDir, caKey, caCert, aggregatorCAKeyPath, aggregatorCACertPath, cfg, false) - if err != nil { - return fmt.Errorf("failed to generate aggregator CA: %v", err) - } - - // generate service-serving CA - cfg = &tls.CertCfg{ - Subject: pkix.Name{CommonName: "service-serving", OrganizationalUnit: []string{"bootkube"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: tls.ValidityTenYears, - IsCA: true, - } - if _, _, err := generateCert(clusterDir, caKey, caCert, serviceServingCAKeyPath, serviceServingCACertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate service-serving CA: %v", err) - } - - // Ingress certs - if err := copy.Copy(filepath.Join(clusterDir, kubeCACertPath), filepath.Join(clusterDir, ingressCACertPath)); err != nil { - return fmt.Errorf("failed to import kube CA cert into ingress-ca.crt: %v", err) - } - - baseAddress := c.getBaseAddress() - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - DNSNames: []string{ - baseAddress, - fmt.Sprintf("*.%s", baseAddress), - }, - Subject: pkix.Name{CommonName: baseAddress, Organization: []string{"ingress"}}, - Validity: tls.ValidityTenYears, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, kubeCAKey, kubeCACert, ingressKeyPath, ingressCertPath, cfg, true); err != nil { - return fmt.Errorf("failed to generate ingress CA: %v", err) - } - - // Kube admin certs - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "system:admin", Organization: []string{"system:masters"}}, - Validity: tls.ValidityTenYears, - IsCA: false, - } - - if _, _, err = generateCert(clusterDir, kubeCAKey, kubeCACert, adminKeyPath, adminCertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate kube admin certificate: %v", err) - } - - // Kube API server certs - apiServerAddress, err := cidrhost(c.Cluster.Networking.ServiceCIDR, 1) - if err != nil { - return fmt.Errorf("can't resolve api server host address: %v", err) - } - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "kube-apiserver", Organization: []string{"kube-master"}}, - DNSNames: []string{ - fmt.Sprintf("%s-api.%s", c.Name, c.BaseDomain), - "kubernetes", "kubernetes.default", - "kubernetes.default.svc", - "kubernetes.default.svc.cluster.local", - }, - Validity: tls.ValidityTenYears, - IPAddresses: []net.IP{net.ParseIP(apiServerAddress)}, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, kubeCAKey, kubeCACert, apiServerKeyPath, apiServerCertPath, cfg, true); err != nil { - return fmt.Errorf("failed to generate kube api server certificate: %v", err) - } - - // Kube API openshift certs - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "openshift-apiserver", Organization: []string{"kube-master"}}, - DNSNames: []string{ - fmt.Sprintf("%s-api.%s", c.Name, c.BaseDomain), - "openshift-apiserver", - "openshift-apiserver.kube-system", - "openshift-apiserver.kube-system.svc", - "openshift-apiserver.kube-system.svc.cluster.local", - "localhost", "127.0.0.1"}, - Validity: tls.ValidityTenYears, - IPAddresses: []net.IP{net.ParseIP(apiServerAddress)}, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, aggregatorCAKey, aggregatorCACert, osAPIServerKeyPath, osAPIServerCertPath, cfg, true); err != nil { - return fmt.Errorf("failed to generate openshift api server certificate: %v", err) - } - - // Kube API proxy certs - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "kube-apiserver-proxy", Organization: []string{"kube-master"}}, - Validity: tls.ValidityTenYears, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, aggregatorCAKey, aggregatorCACert, apiServerProxyKeyPath, apiServerProxyCertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate kube api proxy certificate: %v", err) - } - - // Kubelet certs - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "system:serviceaccount:kube-system:default", Organization: []string{"system:serviceaccounts:kube-system"}}, - Validity: tls.ValidityThirtyMinutes, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, kubeCAKey, kubeCACert, kubeletKeyPath, kubeletCertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate kubelet certificate: %v", err) - } - - // MachineConfigServer certs - mcsDomain := fmt.Sprintf("%s-api.%s", c.Name, c.BaseDomain) - cfg = &tls.CertCfg{ - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - DNSNames: []string{mcsDomain}, - Subject: pkix.Name{CommonName: mcsDomain}, - Validity: tls.ValidityTenYears, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, caKey, caCert, machineConfigServerKeyPath, machineConfigServerCertPath, cfg, false); err != nil { - return fmt.Errorf("failed to generate machine-config-server certificate: %v", err) - } - - // Cluster API cert - cfg = &tls.CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - Subject: pkix.Name{CommonName: "clusterapi", OrganizationalUnit: []string{"bootkube"}}, - DNSNames: []string{ - "clusterapi", - fmt.Sprintf("clusterapi.%s", maoTargetNamespace), - fmt.Sprintf("clusterapi.%s.svc", maoTargetNamespace), - fmt.Sprintf("clusterapi.%s.svc.cluster.local", maoTargetNamespace), - }, - Validity: tls.ValidityTenYears, - IsCA: false, - } - - if _, _, err := generateCert(clusterDir, aggregatorCAKey, aggregatorCACert, clusterAPIServerKeyPath, clusterAPIServerCertPath, cfg, true); err != nil { - return fmt.Errorf("failed to generate cluster-apiserver certificate: %v", err) - } - - // Service Account private and public key. - svcAccountPrivKey, err := generatePrivateKey(clusterDir, serviceAccountPrivateKeyPath) - if err != nil { - return fmt.Errorf("failed to generate service-account private key: %v", err) - } - - pubkeyPath := filepath.Join(clusterDir, serviceAccountPubkeyPath) - pubkeyData, err := tls.PublicKeyToPem(&svcAccountPrivKey.PublicKey) - if err != nil { - return fmt.Errorf("failed to generate service-account public key: %v", err) - } - if err := ioutil.WriteFile(pubkeyPath, []byte(pubkeyData), 0600); err != nil { - return fmt.Errorf("failed to write service-account public key: %v", err) - } - - return nil -} - -// generatePrivateKey generates and writes the private key to disk -func generatePrivateKey(clusterDir string, path string) (*rsa.PrivateKey, error) { - fileTargetPath := filepath.Join(clusterDir, path) - key, err := tls.PrivateKey() - if err != nil { - return nil, fmt.Errorf("error writing private key: %v", err) - } - if err := ioutil.WriteFile(fileTargetPath, []byte(tls.PrivateKeyToPem(key)), 0600); err != nil { - return nil, err - } - return key, nil -} - -// generateRootCert creates the rootCAKey and rootCACert -func generateRootCert(clusterDir string) (cert *x509.Certificate, key *rsa.PrivateKey, err error) { - targetKeyPath := filepath.Join(clusterDir, rootCAKeyPath) - targetCertPath := filepath.Join(clusterDir, rootCACertPath) - - cfg := &tls.CertCfg{ - Subject: pkix.Name{ - CommonName: "root-ca", - OrganizationalUnit: []string{"openshift"}, - }, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: tls.ValidityTenYears, - IsCA: true, - } - - caKey, caCert, err := tls.GenerateRootCertKey(cfg) - if err != nil { - return nil, nil, err - } - - if err := ioutil.WriteFile(targetKeyPath, []byte(tls.PrivateKeyToPem(caKey)), 0600); err != nil { - return nil, nil, err - } - - if err := ioutil.WriteFile(targetCertPath, []byte(tls.CertToPem(caCert)), 0666); err != nil { - return nil, nil, err - } - - return caCert, caKey, nil -} - -// getCertFiles copies the given cert/key files into the generated folder and returns their contents -func getCertFiles(clusterDir string, certPath string, keyPath string) (*x509.Certificate, *rsa.PrivateKey, error) { - keyDst := filepath.Join(clusterDir, rootCAKeyPath) - if err := copy.Copy(keyPath, keyDst); err != nil { - return nil, nil, fmt.Errorf("failed to write file: %v", err) - } - - certDst := filepath.Join(clusterDir, rootCACertPath) - if err := copy.Copy(certPath, certDst); err != nil { - return nil, nil, fmt.Errorf("failed to write file: %v", err) - } - // content validation occurs in pkg/config/validate.go - // if it fails here, something went wrong - certData, err := ioutil.ReadFile(certPath) - if err != nil { - panic(err) - } - certPem, _ := pem.Decode([]byte(string(certData))) - keyData, err := ioutil.ReadFile(keyPath) - if err != nil { - panic(err) - } - keyPem, _ := pem.Decode([]byte(string(keyData))) - key, err := x509.ParsePKCS1PrivateKey(keyPem.Bytes) - if err != nil { - return nil, nil, fmt.Errorf("failed to process private key: %v", err) - } - certs, err := x509.ParseCertificates(certPem.Bytes) - if err != nil { - return nil, nil, fmt.Errorf("failed to process certificate: %v", err) - } - - return certs[0], key, nil -} - -// generateCert creates a key, csr & a signed cert -// If appendCA is true, then also append the CA cert into the result cert. -// This is useful for apiserver and openshift-apiser cert which will be -// authenticated by the kubeconfig using root-ca. -func generateCert(clusterDir string, - caKey *rsa.PrivateKey, - caCert *x509.Certificate, - keyPath string, - certPath string, - cfg *tls.CertCfg, - appendCA bool) (*rsa.PrivateKey, *x509.Certificate, error) { - - targetKeyPath := filepath.Join(clusterDir, keyPath) - targetCertPath := filepath.Join(clusterDir, certPath) - - key, cert, err := tls.GenerateCert(caKey, caCert, cfg) - if err != nil { - return nil, nil, err - } - - if err := ioutil.WriteFile(targetKeyPath, []byte(tls.PrivateKeyToPem(key)), 0600); err != nil { - return nil, nil, err - } - - content := []byte(tls.CertToPem(cert)) - if appendCA { - content = append(content, '\n') - content = append(content, []byte(tls.CertToPem(caCert))...) - } - if err := ioutil.WriteFile(targetCertPath, content, 0666); err != nil { - return nil, nil, err - } - - return key, cert, nil -} diff --git a/installer/pkg/copy/copy.go b/installer/pkg/copy/copy.go deleted file mode 100644 index 9036adf9580..00000000000 --- a/installer/pkg/copy/copy.go +++ /dev/null @@ -1,26 +0,0 @@ -// Package copy supports copying a file to another path. -package copy - -import ( - "io" - "os" -) - -// Copy creates a new file at toFilePath with with mode 0666 (before -// umask) and the same content as fromFilePath. -func Copy(fromFilePath, toFilePath string) error { - from, err := os.Open(fromFilePath) - if err != nil { - return err - } - defer from.Close() - - to, err := os.OpenFile(toFilePath, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - return err - } - defer to.Close() - - _, err = io.Copy(to, from) - return err -} diff --git a/installer/pkg/copy/copy_test.go b/installer/pkg/copy/copy_test.go deleted file mode 100644 index 72b570f7063..00000000000 --- a/installer/pkg/copy/copy_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package copy - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestCopyFile(t *testing.T) { - dir, err := ioutil.TempDir("", "workflow-test-") - if err != nil { - t.Error(err) - } - defer os.RemoveAll(dir) - - sourcePath := filepath.Join(dir, "source") - sourceContent := []byte("Hello, World!\n") - err = ioutil.WriteFile(sourcePath, sourceContent, 0600) - if err != nil { - t.Error(err) - } - - targetPath := filepath.Join(dir, "target") - err = Copy(sourcePath, targetPath) - if err != nil { - t.Error(err) - } - - targetContent, err := ioutil.ReadFile(targetPath) - if err != nil { - t.Error(err) - } - - if string(targetContent) != string(sourceContent) { - t.Errorf("target %q != source %q", string(targetContent), string(sourceContent)) - } -} diff --git a/installer/pkg/validate/fixtures/exists b/installer/pkg/validate/fixtures/exists deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/installer/pkg/validate/last_ip_test.go b/installer/pkg/validate/last_ip_test.go deleted file mode 100644 index d4c8610c19e..00000000000 --- a/installer/pkg/validate/last_ip_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package validate - -import ( - "net" - "testing" -) - -func TestLastIP(t *testing.T) { - cases := []struct { - in net.IPNet - out net.IP - }{ - { - in: net.IPNet{ - IP: net.ParseIP("192.168.0.0").To4(), - Mask: net.CIDRMask(24, 32), - }, - out: net.ParseIP("192.168.0.255"), - }, - { - in: net.IPNet{ - IP: net.ParseIP("192.168.0.0").To4(), - Mask: net.CIDRMask(22, 32), - }, - out: net.ParseIP("192.168.3.255"), - }, - { - in: net.IPNet{ - IP: net.ParseIP("192.168.0.0").To4(), - Mask: net.CIDRMask(32, 32), - }, - out: net.ParseIP("192.168.0.0"), - }, - { - in: net.IPNet{ - IP: net.ParseIP("0.0.0.0").To4(), - Mask: net.CIDRMask(0, 32), - }, - out: net.ParseIP("255.255.255.255"), - }, - } - - var out net.IP - for i, c := range cases { - if out = lastIP(&c.in); out.String() != c.out.String() { - t.Errorf("test case %d: expected %s but got %s", i, c.out, out) - } - } -} diff --git a/installer/pkg/validate/validate.go b/installer/pkg/validate/validate.go deleted file mode 100644 index 416cc02392f..00000000000 --- a/installer/pkg/validate/validate.go +++ /dev/null @@ -1,470 +0,0 @@ -package validate - -import ( - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "net" - "os" - "regexp" - "strconv" - "strings" - "unicode/utf8" -) - -func isMatch(re string, v string) bool { - return regexp.MustCompile(re).MatchString(v) -} - -// PrefixError wraps an error with a prefix or returns nil if there was no error. -// This is useful for wrapping errors returned by generic error funcs like `NonEmpty` so that the error includes the offending field name. -func PrefixError(prefix string, err error) error { - if err != nil { - return fmt.Errorf("%s: %v", prefix, err) - } - return nil -} - -// JSONFile validates that the file at the given path is valid JSON. -func JSONFile(path string) error { - b, err := ioutil.ReadFile(path) - if err != nil { - return err - } - err = JSON(b) - if err != nil { - return fmt.Errorf("file %q contains invalid JSON: %v", path, err) - } - return nil -} - -// JSON validates that the given data is valid JSON. -func JSON(data []byte) error { - var dummy interface{} - return json.Unmarshal(data, &dummy) -} - -// FileExists validates a file exists at the given path. -func FileExists(path string) error { - _, err := os.Stat(path) - return err -} - -// NonEmpty checks if the given string contains at least one non-whitespace character and returns an error if not. -func NonEmpty(v string) error { - if utf8.RuneCountInString(strings.TrimSpace(v)) == 0 { - return errors.New("cannot be empty") - } - return nil -} - -// Int checks if the given string is a valid integer and returns an error if not. -func Int(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - if _, err := strconv.Atoi(v); err != nil { - return errors.New("invalid integer") - } - return nil -} - -// IntRange checks if the given string is a valid integer between `min` and `max` and returns an error if not. -func IntRange(v string, min int, max int) error { - i, err := strconv.Atoi(v) - if err != nil { - return Int(v) - } - if i < min { - return fmt.Errorf("cannot be less than %v", min) - } - if i > max { - return fmt.Errorf("cannot be greater than %v", max) - } - return nil -} - -// IntOdd checks if the given string is a valid integer and that it is odd and returns an error if not. -func IntOdd(v string) error { - i, err := strconv.Atoi(v) - if err != nil { - return Int(v) - } - if i%2 != 1 { - return errors.New("must be an odd integer") - } - return nil -} - -// ClusterName checks if the given string is a valid name for a cluster and returns an error if not. -func ClusterName(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - if length := utf8.RuneCountInString(v); length < 1 || length > 253 { - return errors.New("must be between 1 and 253 characters") - } - - if strings.ToLower(v) != v { - return errors.New("must be lower case") - } - - if !isMatch("^[a-z0-9-.]*$", v) { - return errors.New("only lower case alphanumeric [a-z0-9], dashes and dots are allowed") - } - - isAlphaNum := regexp.MustCompile("^[a-z0-9]$").MatchString - - // If we got this far, we know the string is ASCII and has at least one character - if !isAlphaNum(v[:1]) || !isAlphaNum(v[len(v)-1:]) { - return errors.New("must start and end with a lower case alphanumeric character [a-z0-9]") - } - - for _, segment := range strings.Split(v, ".") { - // Each segment can have up to 63 characters - if utf8.RuneCountInString(segment) > 63 { - return errors.New("no segment between dots can be more than 63 characters") - } - if !isAlphaNum(segment[:1]) || !isAlphaNum(segment[len(segment)-1:]) { - return errors.New("segments between dots must start and end with a lower case alphanumeric character [a-z0-9]") - } - } - - return nil -} - -// AWSClusterName checks if the given string is a valid name for a cluster on AWS and returns an error if not. -// See AWS docs: -// http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-using-console-create-stack-parameters.html -// http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-loadbalancer.html#cfn-elasticloadbalancingv2-loadbalancer-name -func AWSClusterName(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - if length := utf8.RuneCountInString(v); length < 1 || length > 28 { - return errors.New("must be between 1 and 28 characters") - } - - if strings.ToLower(v) != v { - return errors.New("must be lower case") - } - - if strings.HasPrefix(v, "-") || strings.HasSuffix(v, "-") { - return errors.New("must not start or end with '-'") - } - - if !isMatch("^[a-z][-a-z0-9]*$", v) { - return errors.New("must be a lower case AWS Stack Name: [a-z][-a-z0-9]*") - } - - return nil -} - -// MAC checks if the given string is a valid MAC address and returns an error if not. -// Based on net.ParseMAC. -func MAC(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - if _, err := net.ParseMAC(v); err != nil { - return errors.New("invalid MAC Address") - } - return nil -} - -// IPv4 checks if the given string is a valid IP v4 address and returns an error if not. -// Based on net.ParseIP. -func IPv4(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - if ip := net.ParseIP(v); ip == nil || !strings.Contains(v, ".") { - return errors.New("invalid IPv4 address") - } - return nil -} - -// SubnetCIDR checks if the given string is a valid CIDR for a master nodes or worker nodes subnet and returns an error if not. -func SubnetCIDR(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - split := strings.Split(v, "/") - - if len(split) == 1 { - return errors.New("must provide a CIDR netmask (eg, /24)") - } - - if len(split) != 2 { - return errors.New("invalid IPv4 address") - } - - ip := split[0] - - if err := IPv4(ip); err != nil { - return errors.New("invalid IPv4 address") - } - - if mask, err := strconv.Atoi(split[1]); err != nil || mask < 0 || mask > 32 { - return errors.New("invalid netmask size (must be between 0 and 32)") - } - - // Catch any invalid CIDRs not caught by the checks above - if _, _, err := net.ParseCIDR(v); err != nil { - return errors.New("invalid CIDR") - } - - if strings.HasPrefix(ip, "172.17.") { - return errors.New("overlaps with default Docker Bridge subnet (172.17.0.0/16)") - } - - return nil -} - -// AWSSubnetCIDR checks if the given string is a valid CIDR for a master nodes or worker nodes subnet in an AWS VPC and returns an error if not. -func AWSSubnetCIDR(v string) error { - if err := SubnetCIDR(v); err != nil { - return err - } - - _, network, err := net.ParseCIDR(v) - if err != nil { - return errors.New("invalid CIDR") - } - if mask, _ := network.Mask.Size(); mask < 16 || mask > 28 { - return errors.New("AWS subnets must be between /16 and /28") - } - - return nil -} - -// DomainName checks if the given string is a valid domain name and returns an error if not. -func DomainName(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - split := strings.Split(v, ".") - for i, segment := range split { - // Trailing dot is OK - if len(segment) == 0 && i == len(split)-1 { - continue - } - if !isMatch("^[a-zA-Z0-9-]{1,63}$", segment) { - return errors.New("invalid domain name") - } - } - return nil -} - -// Host checks if the given string is either a valid IPv4 address or a valid domain name and returns an error if not. -func Host(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - // Either a valid IP address or domain name - if IPv4(v) != nil && DomainName(v) != nil { - return errors.New("invalid host (must be a domain name or IP address)") - } - return nil -} - -// Port checks if the given string is a valid port number and returns an error if not. -func Port(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - if IntRange(v, 1, 65535) != nil { - return errors.New("invalid port number") - } - return nil -} - -// HostPort checks if the given string is valid : format and returns an error if not. -func HostPort(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - split := strings.Split(v, ":") - if len(split) != 2 { - return errors.New("must use : format") - } - if err := Host(split[0]); err != nil { - return err - } - return Port(split[1]) -} - -// Email checks if the given string is a valid email address and returns an error if not. -func Email(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - invalidError := errors.New("invalid email address") - - split := strings.Split(v, "@") - if len(split) != 2 { - return invalidError - } - localPart := split[0] - domain := split[1] - - if NonEmpty(localPart) != nil { - return invalidError - } - - // No whitespace allowed in local-part - if isMatch(`\s`, localPart) { - return invalidError - } - - return DomainName(domain) -} - -const base64RegExp = `[A-Za-z0-9+\/]+={0,2}` - -// Certificate checks if the given string is a valid certificate in PEM format and returns an error if not. -// Ignores leading and trailing whitespace. -func Certificate(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - trimmed := strings.TrimSpace(v) - - // Don't let users hang themselves - if err := PrivateKey(trimmed); err == nil { - return errors.New("invalid certificate (appears to be a private key)") - } - block, _ := pem.Decode([]byte(trimmed)) - if block == nil { - return errors.New("failed to parse certificate") - } - if _, err := x509.ParseCertificate(block.Bytes); err != nil { - return errors.New("invalid certificate") - } - return nil -} - -// PrivateKey checks if the given string is a valid private key in PEM format and returns an error if not. -// Ignores leading and trailing whitespace. -func PrivateKey(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - // try to decode the private key pem block - block, _ := pem.Decode([]byte(v)) - if block == nil { - return errors.New("failed to parse private key") - } - // if input can be decoded, let's verify the pem input is a key (and not a certificate) - if block.Type != "RSA PRIVATE KEY" { - return errors.New("invalid private key") - } - - return nil -} - -// OpenSSHPublicKey checks if the given string is a valid OpenSSH public key and returns an error if not. -// Ignores leading and trailing whitespace. -func OpenSSHPublicKey(v string) error { - if err := NonEmpty(v); err != nil { - return err - } - - trimmed := strings.TrimSpace(v) - - // Don't let users hang themselves - if isMatch(`-BEGIN [\w-]+ PRIVATE KEY-`, trimmed) { - return errors.New("invalid SSH public key (appears to be a private key)") - } - - if strings.Contains(trimmed, "\n") { - return errors.New("invalid SSH public key (should not contain any newline characters)") - } - - invalidError := errors.New("invalid SSH public key") - - keyParts := regexp.MustCompile(`\s+`).Split(trimmed, -1) - if len(keyParts) < 2 { - return invalidError - } - - keyType := keyParts[0] - keyBase64 := keyParts[1] - if !isMatch(`^[\w-]+$`, keyType) || !isMatch("^"+base64RegExp+"$", keyBase64) { - return invalidError - } - - return nil -} - -// CIDRsDontOverlap ensures two given CIDRs don't overlap -// with one another. CIDR starting IPs are canonicalized -// before being compared. -func CIDRsDontOverlap(acidr, bcidr string) error { - _, a, err := net.ParseCIDR(acidr) - if err != nil { - return fmt.Errorf("invalid CIDR %q: %v", acidr, err) - } - if err := CanonicalizeIP(&a.IP); err != nil { - return fmt.Errorf("invalid CIDR %q: %v", acidr, err) - } - _, b, err := net.ParseCIDR(bcidr) - if err != nil { - return fmt.Errorf("invalid CIDR %q: %v", bcidr, err) - } - if err := CanonicalizeIP(&b.IP); err != nil { - return fmt.Errorf("invalid CIDR %q: %v", bcidr, err) - } - err = fmt.Errorf("%q and %q overlap", acidr, bcidr) - // IPs are of different families. - if len(a.IP) != len(b.IP) { - return nil - } - if a.Contains(b.IP) { - return err - } - if a.Contains(lastIP(b)) { - return err - } - if b.Contains(a.IP) { - return err - } - if b.Contains(lastIP(a)) { - return err - } - return nil -} - -// CanonicalizeIP ensures that the given IP is in standard form -// and returns an error otherwise. -func CanonicalizeIP(ip *net.IP) error { - if ip.To4() != nil { - *ip = ip.To4() - return nil - } - if ip.To16() != nil { - *ip = ip.To16() - return nil - } - return fmt.Errorf("IP %q is of unknown type", ip) -} - -func lastIP(cidr *net.IPNet) net.IP { - var last net.IP - for i := 0; i < len(cidr.IP); i++ { - last = append(last, cidr.IP[i]|^cidr.Mask[i]) - } - return last -} diff --git a/installer/pkg/validate/validate_test.go b/installer/pkg/validate/validate_test.go deleted file mode 100644 index 8499be2b0fa..00000000000 --- a/installer/pkg/validate/validate_test.go +++ /dev/null @@ -1,612 +0,0 @@ -package validate_test - -import ( - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/openshift/installer/installer/pkg/validate" - "github.com/openshift/installer/pkg/asset/tls" -) - -const caseMsg = "must be lower case" -const emptyMsg = "cannot be empty" -const invalidDomainMsg = "invalid domain name" -const invalidHostMsg = "invalid host (must be a domain name or IP address)" -const invalidIPMsg = "invalid IPv4 address" -const invalidIntMsg = "invalid integer" -const invalidPortMsg = "invalid port number" -const noCIDRNetmaskMsg = "must provide a CIDR netmask (eg, /24)" - -type test struct { - in string - expected string -} - -type validator func(string) error - -func runTests(t *testing.T, funcName string, fn validator, tests []test) { - for _, test := range tests { - err := fn(test.in) - if (err == nil && test.expected != "") || (err != nil && err.Error() != test.expected) { - t.Errorf("For %s(%q), expected %q, got %q", funcName, test.in, test.expected, err) - } - } -} - -func TestNonEmpty(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", ""}, - {".", ""}, - {"日本語", ""}, - } - runTests(t, "NonEmpty", validate.NonEmpty, tests) -} - -func TestInt(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"2 3", invalidIntMsg}, - {"1.1", invalidIntMsg}, - {"abc", invalidIntMsg}, - {"日本語", invalidIntMsg}, - {"1 abc", invalidIntMsg}, - {"日本語2", invalidIntMsg}, - {"0", ""}, - {"1", ""}, - {"999999", ""}, - {"-1", ""}, - } - runTests(t, "Int", validate.Int, tests) -} - -func TestIntRange(t *testing.T) { - tests := []struct { - in string - min int - max int - expected string - }{ - {"", 4, 6, emptyMsg}, - {" ", 4, 6, emptyMsg}, - {"2 3", 1, 2, invalidIntMsg}, - {"1.1", 0, 0, invalidIntMsg}, - {"abc", -2, -1, invalidIntMsg}, - {"日本語", 99, 100, invalidIntMsg}, - {"5", 4, 6, ""}, - {"5", 5, 5, ""}, - {"5", 6, 8, "cannot be less than 6"}, - {"5", 6, 4, "cannot be less than 6"}, - {"5", 2, 4, "cannot be greater than 4"}, - } - - for _, test := range tests { - err := validate.IntRange(test.in, test.min, test.max) - if (err == nil && test.expected != "") || (err != nil && err.Error() != test.expected) { - t.Errorf("For IntRange(%q, %v, %v), expected %q, got %q", test.in, test.min, test.max, test.expected, err) - } - } -} - -func TestIntOdd(t *testing.T) { - notOddMsg := "must be an odd integer" - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"0", notOddMsg}, - {"1", ""}, - {"2", notOddMsg}, - {"99", ""}, - {"100", notOddMsg}, - {"abc", invalidIntMsg}, - {"1 abc", invalidIntMsg}, - {"日本語", invalidIntMsg}, - } - runTests(t, "IntOdd", validate.IntOdd, tests) -} - -func TestClusterName(t *testing.T) { - const charsMsg = "only lower case alphanumeric [a-z0-9], dashes and dots are allowed" - const lengthMsg = "must be between 1 and 253 characters" - const segmentLengthMsg = "no segment between dots can be more than 63 characters" - const startEndCharMsg = "must start and end with a lower case alphanumeric character [a-z0-9]" - const segmentStartEndCharMsg = "segments between dots must start and end with a lower case alphanumeric character [a-z0-9]" - - maxSizeName := strings.Repeat("123456789.", 25) + "123" - maxSizeSegment := strings.Repeat("1234567890", 6) + "123" - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", ""}, - {"A", caseMsg}, - {"abc D", caseMsg}, - {"1", ""}, - {".", startEndCharMsg}, - {"a.", startEndCharMsg}, - {".a", startEndCharMsg}, - {"a.a", ""}, - {"-a", startEndCharMsg}, - {"a-", startEndCharMsg}, - {"a.-a", segmentStartEndCharMsg}, - {"a-.a", segmentStartEndCharMsg}, - {"a%a", charsMsg}, - {"日本語", charsMsg}, - {"a日本語a", charsMsg}, - {maxSizeName, ""}, - {maxSizeName + "a", lengthMsg}, - {maxSizeSegment + ".abc", ""}, - {maxSizeSegment + "a.abc", segmentLengthMsg}, - } - runTests(t, "ClusterName", validate.ClusterName, tests) -} - -func TestAWSClusterName(t *testing.T) { - const charsMsg = "must be a lower case AWS Stack Name: [a-z][-a-z0-9]*" - const lengthMsg = "must be between 1 and 28 characters" - const hyphenMsg = "must not start or end with '-'" - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", ""}, - {"A", caseMsg}, - {"abc D", caseMsg}, - {"1", charsMsg}, - {".", charsMsg}, - {"a.", charsMsg}, - {".a", charsMsg}, - {"a.a", charsMsg}, - {"a%a", charsMsg}, - {"a-a", ""}, - {"-abc", hyphenMsg}, - {"abc-", hyphenMsg}, - {"日本語", charsMsg}, - {"a日本語a", charsMsg}, - {"a234567890123456789012345678", ""}, - {"12345678901234567890123456789", lengthMsg}, - {"A2345678901234567890123456789", lengthMsg}, - } - runTests(t, "AWSClusterName", validate.AWSClusterName, tests) -} - -func TestMAC(t *testing.T) { - const invalidMsg = "invalid MAC Address" - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"abc", invalidMsg}, - {"12:34:45:78:9A:BC", ""}, - {"12-34-45-78-9A-BC", ""}, - {"12:34:45:78:9a:bc", ""}, - {"12:34:45:78:9X:YZ", invalidMsg}, - {"12.34.45.78.9A.BC", invalidMsg}, - } - runTests(t, "MAC", validate.MAC, tests) -} - -func TestIPv4(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"0.0.0.0", ""}, - {"1.2.3.4", ""}, - {"1.2.3.", invalidIPMsg}, - {"1.2.3.4.", invalidIPMsg}, - {"1.2.3.a", invalidIPMsg}, - {"255.255.255.255", ""}, - } - runTests(t, "IPv4", validate.IPv4, tests) -} - -func TestSubnetCIDR(t *testing.T) { - const netmaskSizeMsg = "invalid netmask size (must be between 0 and 32)" - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"/16", invalidIPMsg}, - {"0.0.0.0/0", ""}, - {"0.0.0.0/32", ""}, - {"1.2.3.4", noCIDRNetmaskMsg}, - {"1.2.3.", noCIDRNetmaskMsg}, - {"1.2.3.4.", noCIDRNetmaskMsg}, - {"1.2.3.4/0", ""}, - {"1.2.3.4/1", ""}, - {"1.2.3.4/31", ""}, - {"1.2.3.4/32", ""}, - {"1.2.3./16", invalidIPMsg}, - {"1.2.3.4./16", invalidIPMsg}, - {"1.2.3.4/33", netmaskSizeMsg}, - {"1.2.3.4/-1", netmaskSizeMsg}, - {"1.2.3.4/abc", netmaskSizeMsg}, - {"172.17.1.2", noCIDRNetmaskMsg}, - {"172.17.1.2/", netmaskSizeMsg}, - {"172.17.1.2/33", netmaskSizeMsg}, - {"172.17.1.2/20", "overlaps with default Docker Bridge subnet (172.17.0.0/16)"}, - {"255.255.255.255/1", ""}, - {"255.255.255.255/32", ""}, - } - runTests(t, "SubnetCIDR", validate.SubnetCIDR, tests) -} - -func TestAWSsubnetCIDR(t *testing.T) { - const awsNetmaskSizeMsg = "AWS subnets must be between /16 and /28" - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"/20", invalidIPMsg}, - {"1.2.3.4", noCIDRNetmaskMsg}, - {"1.2.3.4/15", awsNetmaskSizeMsg}, - {"1.2.3.4/16", ""}, - {"1.2.3.4/28", ""}, - {"1.2.3.4/29", awsNetmaskSizeMsg}, - } - runTests(t, "AWSSubnetCIDR", validate.AWSSubnetCIDR, tests) -} - -func TestDomainName(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", ""}, - {".", invalidDomainMsg}, - {"日本語", invalidDomainMsg}, - {"日本語.com", invalidDomainMsg}, - {"abc.日本語.com", invalidDomainMsg}, - {"a日本語a.com", invalidDomainMsg}, - {"abc", ""}, - {"ABC", ""}, - {"ABC123", ""}, - {"ABC123.COM123", ""}, - {"1", ""}, - {"0.0", ""}, - {"1.2.3.4", ""}, - {"1.2.3.4.", ""}, - {"abc.", ""}, - {"abc.com", ""}, - {"abc.com.", ""}, - {"a.b.c.d.e.f", ""}, - {".abc", invalidDomainMsg}, - {".abc.com", invalidDomainMsg}, - {".abc.com", invalidDomainMsg}, - } - runTests(t, "DomainName", validate.DomainName, tests) -} - -func TestHost(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", ""}, - {".", invalidHostMsg}, - {"日本語", invalidHostMsg}, - {"日本語.com", invalidHostMsg}, - {"abc.日本語.com", invalidHostMsg}, - {"a日本語a.com", invalidHostMsg}, - {"abc", ""}, - {"ABC", ""}, - {"ABC123", ""}, - {"ABC123.COM123", ""}, - {"1", ""}, - {"0.0", ""}, - {"1.2.3.4", ""}, - {"1.2.3.4.", ""}, - {"abc.", ""}, - {"abc.com", ""}, - {"abc.com.", ""}, - {"a.b.c.d.e.f", ""}, - {".abc", invalidHostMsg}, - {".abc.com", invalidHostMsg}, - {".abc.com", invalidHostMsg}, - } - runTests(t, "Host", validate.Host, tests) -} - -func TestPort(t *testing.T) { - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", invalidPortMsg}, - {".", invalidPortMsg}, - {"日本語", invalidPortMsg}, - {"0", invalidPortMsg}, - {"1", ""}, - {"123", ""}, - {"12345", ""}, - {"65535", ""}, - {"65536", invalidPortMsg}, - } - runTests(t, "Port", validate.Port, tests) -} - -func TestHostPort(t *testing.T) { - const invalidHostPortMsg = "must use : format" - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {".", invalidHostPortMsg}, - {"日本語", invalidHostPortMsg}, - {"abc.com", invalidHostPortMsg}, - {"abc.com:0", invalidPortMsg}, - {"abc.com:1", ""}, - {"abc.com:65535", ""}, - {"abc.com:65536", invalidPortMsg}, - {"abc.com:abc", invalidPortMsg}, - {"1.2.3.4:1234", ""}, - {"1.2.3.4:abc", invalidPortMsg}, - {"日本語:1234", invalidHostMsg}, - } - runTests(t, "HostPort", validate.HostPort, tests) -} - -func TestEmail(t *testing.T) { - const invalidMsg = "invalid email address" - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", invalidMsg}, - {".", invalidMsg}, - {"日本語", invalidMsg}, - {"a@abc.com", ""}, - {"A@abc.com", ""}, - {"1@abc.com", ""}, - {"a.B.1.あ@abc.com", ""}, - {"ア@abc.com", ""}, - {"中文@abc.com", ""}, - {"a@abc.com", ""}, - {"a@ABC.com", ""}, - {"a@123.com", ""}, - {"a@日本語.com", invalidDomainMsg}, - {"a@.com", invalidDomainMsg}, - {"@abc.com", invalidMsg}, - } - runTests(t, "Email", validate.Email, tests) -} - -func TestCertificate(t *testing.T) { - const invalidMsg = "invalid certificate" - const privateKeyMsg = "invalid certificate (appears to be a private key)" - const badPem = "failed to parse certificate" - - // throwaway rsa key - rsaKey, err := tls.PrivateKey() - if err != nil { - t.Fatalf("failed to generate private key: %v", err) - } - keyInBytes := x509.MarshalPKCS1PrivateKey(rsaKey) - keyinPem := pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: keyInBytes, - }, - ) - - // throwaway certificate - cfg := &tls.CertCfg{ - Subject: pkix.Name{ - CommonName: "test-ca", - OrganizationalUnit: []string{"openshift"}, - }, - } - cert, err := tls.SelfSignedCACert(cfg, rsaKey) - if err != nil { - t.Fatalf("failed to generate self signed certificate: %v", err) - } - certInPem := pem.EncodeToMemory( - &pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - }, - ) - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", badPem}, - {".", badPem}, - {"日本語", badPem}, - {string(certInPem), ""}, - - {"-----BEGIN CERTIFICATE-----\na\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\nabc\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\nabc=\n-----END CERTIFICATE-----", invalidMsg}, - {"-----BEGIN CERTIFICATE-----\nabc===\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\na%a\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\n\nab\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\nab\n\n-----END CERTIFICATE-----", badPem}, - {"-----BEGIN CERTIFICATE-----\na\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\na\n-----END CERTIFICATE-----", badPem}, - {string(keyinPem), privateKeyMsg}, - } - runTests(t, "Certificate", validate.Certificate, tests) -} - -func TestPrivateKey(t *testing.T) { - const invalidMsg = "failed to parse private key" - - // throw-away rsa key - rsaKey, err := tls.PrivateKey() - if err != nil { - t.Fatalf("failed to generate private key: %v", err) - } - keyInBytes := x509.MarshalPKCS1PrivateKey(rsaKey) - keyinPem := pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: keyInBytes, - }, - ) - - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", invalidMsg}, - {".", invalidMsg}, - {"日本語", invalidMsg}, - {string(keyinPem), ""}, - {"-----BEGIN RSA PRIVATE KEY-----\na\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\nabc\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\nabc==\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\nabc===\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN EC PRIVATE KEY-----\nabc\n-----END EC PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\na%a\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\n\nab\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\nab\n\n-----END RSA PRIVATE KEY-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\na\n-----END RSA PRIVATE KEY-----\n-----BEGIN CERTIFICATE-----\na\n-----END CERTIFICATE-----", invalidMsg}, - {"-----BEGIN CERTIFICATE-----\na\n-----END CERTIFICATE-----", invalidMsg}, - } - runTests(t, "PrivateKey", validate.PrivateKey, tests) -} - -func TestOpenSSHPublicKey(t *testing.T) { - const invalidMsg = "invalid SSH public key" - const multiLineMsg = "invalid SSH public key (should not contain any newline characters)" - const privateKeyMsg = "invalid SSH public key (appears to be a private key)" - tests := []test{ - {"", emptyMsg}, - {" ", emptyMsg}, - {"a", invalidMsg}, - {".", invalidMsg}, - {"日本語", invalidMsg}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", ""}, - {"ssh-rsa \t AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", ""}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com", ""}, - {"\nssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com", ""}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com\n", ""}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL\nssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", multiLineMsg}, - {"ssh-rsa\nAAAAB3NzaC1yc2EAAAADAQABAAACAQDxL you@example.com", multiLineMsg}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL\nyou@example.com", multiLineMsg}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDxL", ""}, - {"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCt3BebCHqnSsgpLjo4kVvyfY/z2BS8t27r/7du+O2pb4xYkr7n+KFpbOz523vMTpQ+o1jY4u4TgexglyT9nqasWgLOvo1qjD1agHme8LlTPQSk07rXqOB85Uq5p7ig2zoOejF6qXhcc3n1c7+HkxHrgpBENjLVHOBpzPBIAHkAGaZcl07OCqbsG5yxqEmSGiAlh/IiUVOZgdDMaGjCRFy0wk0mQaGD66DmnFc1H5CzcPjsxr0qO65e7lTGsE930KkO1Vc+RHCVwvhdXs+c2NhJ2/3740Kpes9n1/YullaWZUzlCPDXtRuy6JRbFbvy39JUgHWGWzB3d+3f8oJ/N4qZ cardno:000603633110", ""}, - {"-----BEGIN CERTIFICATE-----abcd-----END CERTIFICATE-----", invalidMsg}, - {"-----BEGIN RSA PRIVATE KEY-----\nabc\n-----END RSA PRIVATE KEY-----", privateKeyMsg}, - } - runTests(t, "OpenSSHPublicKey", validate.OpenSSHPublicKey, tests) -} - -func TestCIDRsDontOverlap(t *testing.T) { - cases := []struct { - a string - b string - err bool - }{ - { - a: "192.168.0.0/24", - b: "192.168.0.0/24", - err: true, - }, - { - a: "192.168.0.0/24", - b: "192.168.0.3/24", - err: true, - }, - { - a: "192.168.0.0/30", - b: "192.168.0.3/30", - err: true, - }, - { - a: "192.168.0.0/30", - b: "192.168.0.4/30", - err: false, - }, - { - a: "0.0.0.0/0", - b: "192.168.0.0/24", - err: true, - }, - } - - for i, c := range cases { - if err := validate.CIDRsDontOverlap(c.a, c.b); (err != nil) != c.err { - no := "no" - if c.err { - no = "an" - } - t.Errorf("test case %d: expected %s error, got %v", i, no, err) - } - } -} - -func TestJSONFile(t *testing.T) { - cases := []struct { - buf []byte - err bool - }{ - { - buf: []byte(""), - err: true, - }, - { - buf: []byte("[]"), - err: false, - }, - { - buf: []byte("foobar"), - err: true, - }, - { - buf: []byte("}}}}"), - err: true, - }, - { - buf: []byte("{}"), - err: false, - }, - { - buf: []byte(`{"foo": "bar"}`), - err: false, - }, - } - for i, c := range cases { - f, err := ioutil.TempFile("", "validate") - if err != nil { - t.Fatalf("test case %d: failed to create temporary file: %v", i, err) - } - if _, err := f.Write(c.buf); err != nil { - t.Errorf("test case %d: failed to write to temporary file: %v", i, err) - } - if err := validate.JSONFile(f.Name()); (err != nil) != c.err { - no := "no" - if c.err { - no = "an" - } - t.Errorf("test case %d: expected %s error, got %v", i, no, err) - } - f.Close() - os.Remove(f.Name()) - } -} - -func TestFileExists(t *testing.T) { - cases := []struct { - path string - err bool - }{ - { - path: "./fixtures/doesnotexist", - err: true, - }, - { - path: "./fixtures/exists", - err: false, - }, - } - for i, c := range cases { - if err := validate.FileExists(c.path); (err != nil) != c.err { - no := "no" - if c.err { - no = "an" - } - t.Errorf("test case %d: expected %s error, got %v", i, no, err) - } - } -} diff --git a/installer/pkg/workflow/convert.go b/installer/pkg/workflow/convert.go deleted file mode 100644 index 7be19738a94..00000000000 --- a/installer/pkg/workflow/convert.go +++ /dev/null @@ -1,43 +0,0 @@ -package workflow - -import ( - "encoding/json" - "fmt" - "io/ioutil" - - "github.com/openshift/installer/pkg/types/config" -) - -// ConvertWorkflow creates new instances of the 'convert' workflow, -// responsible for converting an old cluster config. -func ConvertWorkflow(configFilePath string) Workflow { - return Workflow{ - metadata: metadata{configFilePath: configFilePath}, - steps: []step{ - readTFVarsConfigStep, - printYAMLConfigStep, - }, - } -} - -func readTFVarsConfigStep(m *metadata) error { - data, err := ioutil.ReadFile(m.configFilePath) - if err != nil { - return err - } - - m.cluster = config.Cluster{} - - return json.Unmarshal([]byte(data), &m.cluster) -} - -func printYAMLConfigStep(m *metadata) error { - yaml, err := m.cluster.YAML() - if err != nil { - return err - } - - fmt.Println(yaml) - - return nil -} diff --git a/installer/pkg/workflow/destroy.go b/installer/pkg/workflow/destroy.go deleted file mode 100644 index 3c7c6e2354b..00000000000 --- a/installer/pkg/workflow/destroy.go +++ /dev/null @@ -1,160 +0,0 @@ -package workflow - -import ( - "encoding/json" - "fmt" - "path/filepath" - "time" - - "github.com/openshift/installer/pkg/terraform" - log "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/clientcmd" - "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" -) - -const ( - machineSetNamespace = "openshift-cluster-api" - workerMachineSet = "worker" -) - -// DestroyWorkflow creates new instances of the 'destroy' workflow, -// responsible for running the actions required to remove resources -// of an existing cluster and clean up any remaining artefacts. -func DestroyWorkflow(clusterDir string, contOnErr bool) Workflow { - return Workflow{ - metadata: metadata{ - clusterDir: clusterDir, - contOnErr: contOnErr, - }, - steps: []step{ - readClusterConfigStep, - destroyWorkersStep, - destroyInfraStep, - destroyAssetsStep, - }, - } -} - -func destroyAssetsStep(m *metadata) error { - return runDestroyStep(m, terraform.AssetsStep) -} - -func destroyInfraStep(m *metadata) error { - return runDestroyStep(m, terraform.InfraStep) -} - -func destroyWorkersStep(m *metadata) error { - kubeconfig := filepath.Join(m.clusterDir, generatedPath, "auth", "kubeconfig") - - client, err := buildClusterClient(kubeconfig) - if err != nil { - return fmt.Errorf("failed to build cluster-api client: %v", err) - } - - if err := scaleDownWorkers(client); err != nil { - return fmt.Errorf("failed to scale worker MachineSet: %v", err) - } - - if err := waitForWorkers(client); err != nil { - return fmt.Errorf("worker MachineSet failed to scale down: %v", err) - } - - if err := deleteWorkerMachineSet(client); err != nil { - return fmt.Errorf("failed to delete worker MachineSet: %v", err) - } - - return nil -} - -func scaleDownWorkers(client *clientset.Clientset) error { - // Unfortunately, MachineSets don't yet support the scale - // subresource. So we have to patch the object to set the - // replicas to zero. - patch := []struct { - Op string `json:"op"` - Path string `json:"path"` - Value uint32 `json:"value"` - }{{ - Op: "replace", - Path: "/spec/replicas", - Value: 0, - }} - - patchBytes, err := json.Marshal(patch) - if err != nil { - return err - } - - _, err = client.ClusterV1alpha1(). - MachineSets(machineSetNamespace). - Patch(workerMachineSet, types.JSONPatchType, patchBytes) - - return err -} - -func waitForWorkers(client *clientset.Clientset) error { - interval := 3 * time.Second - timeout := 60 * time.Second - - log.Info("Waiting for worker MachineSet to scale down...") - - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - machineSet, err := client.ClusterV1alpha1(). - MachineSets(machineSetNamespace). - Get(workerMachineSet, v1.GetOptions{}) - - if err != nil { - return false, err - } - - if machineSet.Status.Replicas > 0 { - return false, nil - } - - return true, nil - }) - - return err -} - -func deleteWorkerMachineSet(client *clientset.Clientset) error { - return client.ClusterV1alpha1(). - MachineSets(machineSetNamespace). - Delete(workerMachineSet, &v1.DeleteOptions{}) -} - -func runDestroyStep(m *metadata, step string, extraArgs ...string) error { - if !terraform.HasStateFile(m.clusterDir, step) { - // there is no statefile, therefore nothing to destroy for this step - return nil - } - - dir, err := terraform.BaseLocation() - if err != nil { - return err - } - - templateDir, err := terraform.FindStepTemplates(dir, step, m.cluster.Platform) - if err != nil { - return err - } - - return terraform.Destroy(m.clusterDir, step, templateDir, extraArgs...) -} - -func buildClusterClient(kubeconfig string) (*clientset.Clientset, error) { - config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) - if err != nil { - return nil, fmt.Errorf("failed to build config: %v", err) - } - - client, err := clientset.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("failed to build client: %v", err) - } - - return client, nil -} diff --git a/installer/pkg/workflow/fixtures/aws.basic.yaml b/installer/pkg/workflow/fixtures/aws.basic.yaml deleted file mode 100644 index a2f2ca7f46d..00000000000 --- a/installer/pkg/workflow/fixtures/aws.basic.yaml +++ /dev/null @@ -1,39 +0,0 @@ -admin: - email: fake-email@example.com - password: fake-password -aws: - ec2AMIOverride: ami-0af8953af3ec06b7c - master: - ec2Type: m4.large - rootVolume: - iops: 100 - size: 30 - type: gp2 - worker: - ec2Type: m4.large - rootVolume: - iops: 100 - size: 30 - type: gp2 -baseDomain: tectonic-ci.de -master: - nodePools: - - master -name: aws-basic -networking: - mtu: 1480 - podCIDR: 10.2.0.0/16 - serviceCIDR: 10.3.0.0/16 - type: canal -nodePools: - - name: master - count: 2 - - name: worker - count: 3 -platform: aws -pullSecret: '{"auths": {}}' -worker: - nodePools: - - worker -CA: - rootCAKeyAlg: DES diff --git a/installer/pkg/workflow/fixtures/terraform.tfvars b/installer/pkg/workflow/fixtures/terraform.tfvars deleted file mode 100644 index c427ff5be44..00000000000 --- a/installer/pkg/workflow/fixtures/terraform.tfvars +++ /dev/null @@ -1,27 +0,0 @@ -{ - "tectonic_admin_email": "fake-email@example.com", - "tectonic_admin_password": "fake-password", - "tectonic_aws_ec2_ami_override": "ami-0af8953af3ec06b7c", - "tectonic_aws_endpoints": "all", - "tectonic_aws_master_ec2_type": "m4.large", - "tectonic_aws_master_root_volume_iops": 100, - "tectonic_aws_master_root_volume_size": 30, - "tectonic_aws_master_root_volume_type": "gp2", - "tectonic_aws_profile": "default", - "tectonic_aws_region": "us-east-1", - "tectonic_aws_vpc_cidr_block": "10.0.0.0/16", - "tectonic_aws_worker_ec2_type": "m4.large", - "tectonic_aws_worker_root_volume_iops": 100, - "tectonic_aws_worker_root_volume_size": 30, - "tectonic_aws_worker_root_volume_type": "gp2", - "tectonic_base_domain": "tectonic-ci.de", - "tectonic_libvirt_network_if": "osbr0", - "tectonic_master_count": 2, - "tectonic_cluster_name": "aws-basic", - "tectonic_networking": "canal", - "tectonic_service_cidr": "10.3.0.0/16", - "tectonic_cluster_cidr": "10.2.0.0/16", - "tectonic_platform": "aws", - "tectonic_pull_secret": "{\"auths\": {}}", - "tectonic_worker_count": 3 -} diff --git a/installer/pkg/workflow/init.go b/installer/pkg/workflow/init.go deleted file mode 100644 index c778f6a0c1f..00000000000 --- a/installer/pkg/workflow/init.go +++ /dev/null @@ -1,122 +0,0 @@ -package workflow - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - yaml "gopkg.in/yaml.v2" - - configgenerator "github.com/openshift/installer/installer/pkg/config-generator" - "github.com/openshift/installer/pkg/types/config" -) - -const ( - generatedPath = "generated" - kcoConfigFileName = "kco-config.yaml" - maoConfigFileName = "mao-config.yaml" - kubeSystemPath = "generated/manifests" - kubeSystemFileName = "cluster-config.yaml" - tectonicSystemPath = "generated/tectonic" - tlsPath = "generated/tls" - tectonicSystemFileName = "cluster-config.yaml" - terraformVariablesFileName = "terraform.tfvars" -) - -// InitWorkflow creates new instances of the 'init' workflow, -// responsible for initializing a new cluster. -func InitWorkflow(configFilePath string) Workflow { - return Workflow{ - metadata: metadata{configFilePath: configFilePath}, - steps: []step{ - prepareWorspaceStep, - }, - } -} - -func buildInternalConfig(clusterDir string) error { - if clusterDir == "" { - return errors.New("no cluster dir given for building internal config") - } - - // fill the internal struct - clusterID, err := configgenerator.GenerateClusterID(16) - if err != nil { - return err - } - internalCfg := config.Internal{ - ClusterID: clusterID, - } - - // store the content - yamlContent, err := yaml.Marshal(internalCfg) - internalFileContent := []byte("# Do not touch, auto-generated\n") - internalFileContent = append(internalFileContent, yamlContent...) - if err != nil { - return err - } - return ioutil.WriteFile(filepath.Join(clusterDir, internalFileName), internalFileContent, 0666) -} - -func generateTerraformVariablesStep(m *metadata) error { - vars, err := m.cluster.TFVars() - if err != nil { - return err - } - - terraformVariablesFilePath := filepath.Join(m.clusterDir, terraformVariablesFileName) - return ioutil.WriteFile(terraformVariablesFilePath, []byte(vars), 0666) -} - -func prepareWorspaceStep(m *metadata) error { - dir, err := os.Getwd() - if err != nil { - return fmt.Errorf("failed to get current directory: %v", err) - } - - if m.configFilePath == "" { - return errors.New("a path to a config file is required") - } - - // load initial cluster config to get cluster.Name - cluster, err := readClusterConfig(m.configFilePath, "") - if err != nil { - return fmt.Errorf("failed to get configuration from file %q: %v", m.configFilePath, err) - } - - if err := cluster.ValidateAndLog(); err != nil { - return err - } - - if cluster.Platform == config.PlatformLibvirt { - if err := cluster.Libvirt.UseCachedImage(); err != nil { - return err - } - } - - // generate clusterDir folder - clusterDir := filepath.Join(dir, cluster.Name) - m.clusterDir = clusterDir - if stat, err := os.Stat(clusterDir); err == nil && stat.IsDir() { - return fmt.Errorf("cluster directory already exists at %q", clusterDir) - } - - if err := os.MkdirAll(clusterDir, os.ModeDir|0755); err != nil { - return fmt.Errorf("failed to create cluster directory at %q", clusterDir) - } - - // put config file under the clusterDir folder - configFilePath := filepath.Join(clusterDir, configFileName) - configContent, err := yaml.Marshal(cluster) - if err != nil { - return err - } - if err := ioutil.WriteFile(configFilePath, configContent, 0666); err != nil { - return fmt.Errorf("failed to create cluster config at %q: %v", clusterDir, err) - } - - // generate the internal config file under the clusterDir folder - return buildInternalConfig(clusterDir) -} diff --git a/installer/pkg/workflow/init_test.go b/installer/pkg/workflow/init_test.go deleted file mode 100644 index 4501572c779..00000000000 --- a/installer/pkg/workflow/init_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package workflow - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "testing" - - "github.com/openshift/installer/pkg/types/config" -) - -func initTestCluster(cfg string) (*config.Cluster, error) { - testConfig, err := config.ParseConfigFile(cfg) - if err != nil { - return nil, fmt.Errorf("failed to parse test config: %v", err) - } - testConfig.PullSecret = "{\"auths\": {}}" - if len(testConfig.Validate()) != 0 { - return nil, errors.New("failed to validate test conifg") - } - return testConfig, nil -} - -func TestGenerateTerraformVariablesStep(t *testing.T) { - expectedTfVarsFilePath := "./fixtures/terraform.tfvars" - clusterDir := "." - gotTfVarsFilePath := filepath.Join(clusterDir, terraformVariablesFileName) - - // clean up - defer func() { - if err := os.Remove(gotTfVarsFilePath); err != nil { - t.Errorf("failed to clean up generated tf vars file: %v", err) - } - }() - - cluster, err := initTestCluster("./fixtures/aws.basic.yaml") - if err != nil { - t.Fatalf("failed to init cluster: %v", err) - } - - m := &metadata{ - cluster: *cluster, - clusterDir: clusterDir, - } - - generateTerraformVariablesStep(m) - gotData, err := ioutil.ReadFile(gotTfVarsFilePath) - if err != nil { - t.Errorf("failed to load generated tf vars file: %v", err) - } - got := string(gotData) - - expectedData, err := ioutil.ReadFile(expectedTfVarsFilePath) - if err != nil { - t.Errorf("failed to load expected tf vars file: %v", err) - } - expected := string(expectedData) - - if got+"\n" != expected { - t.Errorf("expected: %s, got: %s", expected, got) - } -} - -func TestBuildInternalConfig(t *testing.T) { - testClusterDir := "." - internalFilePath := filepath.Join(testClusterDir, internalFileName) - - // clean up - defer func() { - if err := os.Remove(internalFilePath); err != nil { - t.Errorf("failed to remove temp file: %v", err) - } - }() - - errorTestCases := []struct { - test string - got string - expected string - }{ - { - test: "no clusterDir exists", - got: buildInternalConfig("").Error(), - expected: "no cluster dir given for building internal config", - }, - } - - for _, tc := range errorTestCases { - if tc.got != tc.expected { - t.Errorf("test case %s: expected: %s, got: %s", tc.test, tc.expected, tc.got) - } - } - - if err := buildInternalConfig(testClusterDir); err != nil { - t.Errorf("failed to run buildInternalStep, %v", err) - } - - if _, err := os.Stat(internalFilePath); err != nil { - t.Errorf("failed to create internal file, %v", err) - } - - testInternal, err := config.ParseInternalFile(internalFilePath) - if err != nil { - t.Errorf("failed to parse internal file, %v", err) - } - testCases := []struct { - test string - got string - expected string - }{ - { - test: "clusterId", - got: testInternal.ClusterID, - expected: "^[a-zA-Z0-9_-]*$", - }, - } - - for _, tc := range testCases { - match, _ := regexp.MatchString(tc.expected, tc.got) - if !match { - t.Errorf("test case %s: expected: %s, got: %s", tc.test, tc.expected, tc.got) - } - } -} diff --git a/installer/pkg/workflow/install.go b/installer/pkg/workflow/install.go deleted file mode 100644 index 27f71bda063..00000000000 --- a/installer/pkg/workflow/install.go +++ /dev/null @@ -1,92 +0,0 @@ -package workflow - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/openshift/installer/installer/pkg/config-generator" - "github.com/openshift/installer/pkg/terraform" - "github.com/openshift/installer/pkg/types/config" -) - -// InstallWorkflow creates new instances of the 'install' workflow, -// responsible for running the actions necessary to install a new cluster. -func InstallWorkflow(clusterDir string) Workflow { - return Workflow{ - metadata: metadata{clusterDir: clusterDir}, - steps: []step{ - readClusterConfigStep, - generateTerraformVariablesStep, - generateTLSConfigStep, - generateClusterConfigMaps, - generateIgnConfigStep, - installAssetsStep, - installInfraStep, - }, - } -} - -func installAssetsStep(m *metadata) error { - return runInstallStep(m, terraform.AssetsStep) -} - -func installInfraStep(m *metadata) error { - return runInstallStep(m, terraform.InfraStep) -} - -func runInstallStep(m *metadata, step string, extraArgs ...string) error { - dir, err := terraform.BaseLocation() - if err != nil { - return err - } - templateDir, err := terraform.FindStepTemplates(dir, step, m.cluster.Platform) - if err != nil { - return err - } - if err := terraform.Init(m.clusterDir, templateDir); err != nil { - return err - } - _, err = terraform.Apply(m.clusterDir, step, templateDir, extraArgs...) - return err -} - -func generateIgnConfigStep(m *metadata) error { - c := configgenerator.New(m.cluster) - masterIgns, workerIgn, err := c.GenerateIgnConfig(m.clusterDir) - if err != nil { - return fmt.Errorf("failed to generate ignition configs: %v", err) - } - - terraformVariablesFilePath := filepath.Join(m.clusterDir, terraformVariablesFileName) - data, err := ioutil.ReadFile(terraformVariablesFilePath) - if err != nil { - return fmt.Errorf("failed to read terraform.tfvars: %v", err) - } - - var cluster config.Cluster - if err := json.Unmarshal(data, &cluster); err != nil { - return fmt.Errorf("failed to unmarshal terraform.tfvars: %v", err) - } - - cluster.IgnitionMasters = masterIgns - cluster.IgnitionWorker = workerIgn - - data, err = json.MarshalIndent(&cluster, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal terraform.tfvars: %v", err) - } - - return ioutil.WriteFile(terraformVariablesFilePath, data, 0666) -} - -func generateTLSConfigStep(m *metadata) error { - if err := os.MkdirAll(filepath.Join(m.clusterDir, tlsPath), os.ModeDir|0755); err != nil { - return fmt.Errorf("failed to create TLS directory at %s", tlsPath) - } - - c := configgenerator.New(m.cluster) - return c.GenerateTLSConfig(m.clusterDir) -} diff --git a/installer/pkg/workflow/utils.go b/installer/pkg/workflow/utils.go deleted file mode 100644 index b0345bf656e..00000000000 --- a/installer/pkg/workflow/utils.go +++ /dev/null @@ -1,102 +0,0 @@ -package workflow - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - configgenerator "github.com/openshift/installer/installer/pkg/config-generator" - "github.com/openshift/installer/pkg/types/config" -) - -const ( - configFileName = "config.yaml" - internalFileName = "internal.yaml" -) - -func generateClusterConfigMaps(m *metadata) error { - clusterGeneratedPath := filepath.Join(m.clusterDir, generatedPath) - if err := os.MkdirAll(clusterGeneratedPath, os.ModeDir|0755); err != nil { - return fmt.Errorf("failed to create cluster generated directory at %s", clusterGeneratedPath) - } - - configGenerator := configgenerator.New(m.cluster) - - kcoConfig, err := configGenerator.CoreConfig() - if err != nil { - return err - } - - kcoConfigFilePath := filepath.Join(clusterGeneratedPath, kcoConfigFileName) - if err := ioutil.WriteFile(kcoConfigFilePath, []byte(kcoConfig), 0666); err != nil { - return err - } - - kubeSystem, err := configGenerator.KubeSystem(m.clusterDir) - if err != nil { - return err - } - - kubePath := filepath.Join(m.clusterDir, kubeSystemPath) - if err := os.MkdirAll(kubePath, os.ModeDir|0755); err != nil { - return fmt.Errorf("failed to create manifests directory at %s", kubePath) - } - - kubeSystemConfigFilePath := filepath.Join(kubePath, kubeSystemFileName) - if err := ioutil.WriteFile(kubeSystemConfigFilePath, []byte(kubeSystem), 0666); err != nil { - return err - } - - tectonicSystem, err := configGenerator.TectonicSystem() - if err != nil { - return err - } - - tectonicPath := filepath.Join(m.clusterDir, tectonicSystemPath) - if err := os.MkdirAll(tectonicPath, os.ModeDir|0755); err != nil { - return fmt.Errorf("failed to create tectonic directory at %s", tectonicPath) - } - - tectonicSystemConfigFilePath := filepath.Join(tectonicPath, tectonicSystemFileName) - return ioutil.WriteFile(tectonicSystemConfigFilePath, []byte(tectonicSystem), 0666) -} - -func readClusterConfig(configFilePath string, internalFilePath string) (*config.Cluster, error) { - cfg, err := config.ParseConfigFile(configFilePath) - if err != nil { - return nil, fmt.Errorf("%s is not a valid config file: %s", configFilePath, err) - } - - if internalFilePath != "" { - internal, err := config.ParseInternalFile(internalFilePath) - if err != nil { - return nil, fmt.Errorf("%s is not a valid internal file: %s", internalFilePath, err) - } - cfg.Internal = *internal - } - - return cfg, nil -} - -func readClusterConfigStep(m *metadata) error { - if m.clusterDir == "" { - return errors.New("no cluster dir given for reading config") - } - configFilePath := filepath.Join(m.clusterDir, configFileName) - internalFilePath := filepath.Join(m.clusterDir, internalFileName) - - cluster, err := readClusterConfig(configFilePath, internalFilePath) - if err != nil { - return err - } - - if err := cluster.ValidateAndLog(); err != nil { - return err - } - - m.cluster = *cluster - - return nil -} diff --git a/installer/pkg/workflow/workflow.go b/installer/pkg/workflow/workflow.go deleted file mode 100644 index f700533b8c8..00000000000 --- a/installer/pkg/workflow/workflow.go +++ /dev/null @@ -1,49 +0,0 @@ -package workflow - -import ( - "github.com/openshift/installer/pkg/types/config" - log "github.com/sirupsen/logrus" -) - -// metadata is the state store of the current workflow execution. -// It is meant to carry state for one step to another. -// When creating a new workflow, initial state from external parameters -// is also injected by when initializing the metadata object. -// Steps take their inputs from the metadata object and persist -// results onto it for later consumption. -type metadata struct { - cluster config.Cluster - configFilePath string - clusterDir string - contOnErr bool -} - -// step is the entrypoint of a workflow step implementation. -// To add a new step, put your logic in a function that matches this signature. -// Next, add a reference to this new function in a Workflow's steps list. -type step func(*metadata) error - -// Workflow is a high-level representation -// of a set of actions performed in a predictable order. -type Workflow struct { - metadata metadata - steps []step -} - -// Execute runs all steps in order. -func (w Workflow) Execute() error { - var firstError error - for _, step := range w.steps { - if err := step(&w.metadata); err != nil { - if !w.metadata.contOnErr { - return err - } - if firstError == nil { - firstError = err - } - log.Warn(err) - } - } - - return firstError -} diff --git a/installer/pkg/workflow/workflow_test.go b/installer/pkg/workflow/workflow_test.go deleted file mode 100644 index ad02879712f..00000000000 --- a/installer/pkg/workflow/workflow_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package workflow - -import ( - "errors" - "testing" -) - -func test1Step(m *metadata) error { - return nil -} - -func test2Step(m *metadata) error { - return nil -} - -func test3Step(m *metadata) error { - return errors.New("step failed") -} - -func TestWorkflowTypeExecute(t *testing.T) { - m := metadata{} - - testCases := []struct { - test string - steps []step - m metadata - expectedError bool - }{ - { - test: "All steps succeed", - steps: []step{test1Step, test2Step}, - m: m, - expectedError: false, - }, - { - test: "At least one step fails", - steps: []step{test1Step, test2Step, test3Step}, - m: m, - expectedError: true, - }, - } - - for _, tc := range testCases { - wf := Workflow{ - metadata: tc.m, - steps: tc.steps, - } - err := wf.Execute() - if (err != nil) != tc.expectedError { - t.Errorf("Test case %s: WorkflowType.Execute() expected error: %v, got: %v", tc.test, tc.expectedError, (err != nil)) - } - } -} From de8e527126050dc47c948eb1359402d4b2cb5b8d Mon Sep 17 00:00:00 2001 From: Alex Crawford Date: Wed, 26 Sep 2018 15:26:26 -0700 Subject: [PATCH 5/7] vendor: remove unused dependencies After removing the installer code, these dependencies were removed by glide. --- glide.lock | 464 +- vendor/bitbucket.org/ww/goautoneg/autoneg.go | 162 - .../github.com/NYTimes/gziphandler/LICENSE.md | 13 - vendor/github.com/NYTimes/gziphandler/gzip.go | 332 - .../NYTimes/gziphandler/gzip_go18.go | 43 - .../aws/aws-sdk-go/aws/endpoints/defaults.go | 21 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- vendor/github.com/beorn7/perks/LICENSE | 20 - .../beorn7/perks/quantile/stream.go | 292 - vendor/github.com/coreos/etcd/LICENSE | 202 - vendor/github.com/coreos/etcd/NOTICE | 5 - vendor/github.com/coreos/etcd/alarm/alarms.go | 152 - .../coreos/etcd/auth/authpb/auth.pb.go | 824 - vendor/github.com/coreos/etcd/auth/doc.go | 16 - .../coreos/etcd/auth/range_perm_cache.go | 221 - .../coreos/etcd/auth/simple_token.go | 149 - vendor/github.com/coreos/etcd/auth/store.go | 999 - .../coreos/etcd/client/auth_role.go | 237 - .../coreos/etcd/client/auth_user.go | 320 - .../coreos/etcd/client/cancelreq.go | 18 - .../github.com/coreos/etcd/client/client.go | 670 - .../coreos/etcd/client/cluster_error.go | 37 - vendor/github.com/coreos/etcd/client/curl.go | 70 - .../github.com/coreos/etcd/client/discover.go | 21 - vendor/github.com/coreos/etcd/client/doc.go | 73 - .../coreos/etcd/client/keys.generated.go | 1087 - vendor/github.com/coreos/etcd/client/keys.go | 682 - .../github.com/coreos/etcd/client/members.go | 304 - vendor/github.com/coreos/etcd/client/srv.go | 65 - vendor/github.com/coreos/etcd/client/util.go | 53 - .../github.com/coreos/etcd/clientv3/auth.go | 230 - .../coreos/etcd/clientv3/balancer.go | 239 - .../github.com/coreos/etcd/clientv3/client.go | 427 - .../coreos/etcd/clientv3/cluster.go | 102 - .../coreos/etcd/clientv3/compact_op.go | 53 - .../coreos/etcd/clientv3/compare.go | 93 - .../github.com/coreos/etcd/clientv3/config.go | 113 - vendor/github.com/coreos/etcd/clientv3/doc.go | 64 - vendor/github.com/coreos/etcd/clientv3/kv.go | 167 - .../github.com/coreos/etcd/clientv3/lease.go | 508 - .../github.com/coreos/etcd/clientv3/logger.go | 82 - .../coreos/etcd/clientv3/maintenance.go | 164 - vendor/github.com/coreos/etcd/clientv3/op.go | 390 - .../github.com/coreos/etcd/clientv3/retry.go | 293 - .../github.com/coreos/etcd/clientv3/sort.go | 37 - vendor/github.com/coreos/etcd/clientv3/txn.go | 166 - .../github.com/coreos/etcd/clientv3/watch.go | 785 - .../coreos/etcd/compactor/compactor.go | 133 - .../github.com/coreos/etcd/compactor/doc.go | 16 - .../coreos/etcd/discovery/discovery.go | 362 - .../github.com/coreos/etcd/discovery/srv.go | 104 - vendor/github.com/coreos/etcd/error/error.go | 162 - .../coreos/etcd/etcdserver/api/capability.go | 84 - .../coreos/etcd/etcdserver/api/cluster.go | 41 - .../coreos/etcd/etcdserver/api/doc.go | 16 - .../etcd/etcdserver/api/v2http/capability.go | 40 - .../etcd/etcdserver/api/v2http/client.go | 822 - .../etcd/etcdserver/api/v2http/client_auth.go | 543 - .../coreos/etcd/etcdserver/api/v2http/doc.go | 16 - .../coreos/etcd/etcdserver/api/v2http/http.go | 94 - .../etcdserver/api/v2http/httptypes/errors.go | 56 - .../etcdserver/api/v2http/httptypes/member.go | 69 - .../etcd/etcdserver/api/v2http/metrics.go | 96 - .../coreos/etcd/etcdserver/api/v2http/peer.go | 78 - .../coreos/etcd/etcdserver/api/v3rpc/auth.go | 157 - .../coreos/etcd/etcdserver/api/v3rpc/codec.go | 34 - .../coreos/etcd/etcdserver/api/v3rpc/grpc.go | 49 - .../etcd/etcdserver/api/v3rpc/header.go | 46 - .../etcd/etcdserver/api/v3rpc/interceptor.go | 144 - .../coreos/etcd/etcdserver/api/v3rpc/key.go | 253 - .../coreos/etcd/etcdserver/api/v3rpc/lease.go | 98 - .../etcd/etcdserver/api/v3rpc/maintenance.go | 189 - .../etcd/etcdserver/api/v3rpc/member.go | 97 - .../etcd/etcdserver/api/v3rpc/metrics.go | 38 - .../coreos/etcd/etcdserver/api/v3rpc/quota.go | 89 - .../etcd/etcdserver/api/v3rpc/rpctypes/doc.go | 16 - .../etcdserver/api/v3rpc/rpctypes/error.go | 177 - .../etcd/etcdserver/api/v3rpc/rpctypes/md.go | 20 - .../coreos/etcd/etcdserver/api/v3rpc/util.go | 101 - .../coreos/etcd/etcdserver/api/v3rpc/watch.go | 383 - .../coreos/etcd/etcdserver/apply.go | 902 - .../coreos/etcd/etcdserver/apply_auth.go | 195 - .../coreos/etcd/etcdserver/apply_v2.go | 140 - .../coreos/etcd/etcdserver/auth/auth.go | 647 - .../etcd/etcdserver/auth/auth_requests.go | 166 - .../coreos/etcd/etcdserver/cluster_util.go | 268 - .../coreos/etcd/etcdserver/config.go | 200 - .../etcd/etcdserver/consistent_index.go | 33 - .../github.com/coreos/etcd/etcdserver/doc.go | 16 - .../coreos/etcd/etcdserver/errors.go | 45 - .../etcdserver/etcdserverpb/etcdserver.pb.go | 1045 - .../etcdserverpb/raft_internal.pb.go | 2094 - .../etcd/etcdserver/etcdserverpb/rpc.pb.go | 16258 ----- .../etcd/etcdserver/etcdserverpb/rpc.pb.gw.go | 1911 - .../etcd/etcdserver/membership/cluster.go | 512 - .../etcd/etcdserver/membership/errors.go | 33 - .../etcd/etcdserver/membership/member.go | 124 - .../etcd/etcdserver/membership/store.go | 193 - .../coreos/etcd/etcdserver/metrics.go | 95 - .../coreos/etcd/etcdserver/quota.go | 114 - .../github.com/coreos/etcd/etcdserver/raft.go | 547 - .../coreos/etcd/etcdserver/server.go | 1648 - .../coreos/etcd/etcdserver/snapshot_merge.go | 68 - .../coreos/etcd/etcdserver/stats/leader.go | 123 - .../coreos/etcd/etcdserver/stats/queue.go | 110 - .../coreos/etcd/etcdserver/stats/server.go | 150 - .../coreos/etcd/etcdserver/stats/stats.go | 32 - .../coreos/etcd/etcdserver/storage.go | 101 - .../github.com/coreos/etcd/etcdserver/util.go | 97 - .../coreos/etcd/etcdserver/v2_server.go | 125 - .../coreos/etcd/etcdserver/v3_server.go | 804 - .../coreos/etcd/integration/bridge.go | 181 - .../coreos/etcd/integration/cluster.go | 900 - .../coreos/etcd/integration/cluster_direct.go | 37 - .../coreos/etcd/integration/cluster_proxy.go | 89 - .../github.com/coreos/etcd/integration/doc.go | 25 - vendor/github.com/coreos/etcd/lease/doc.go | 16 - .../coreos/etcd/lease/leasehttp/doc.go | 16 - .../coreos/etcd/lease/leasehttp/http.go | 260 - .../coreos/etcd/lease/leasepb/lease.pb.go | 608 - vendor/github.com/coreos/etcd/lease/lessor.go | 602 - vendor/github.com/coreos/etcd/main.go | 29 - .../coreos/etcd/mvcc/backend/backend.go | 352 - .../coreos/etcd/mvcc/backend/batch_tx.go | 206 - .../etcd/mvcc/backend/boltoption_default.go | 21 - .../etcd/mvcc/backend/boltoption_linux.go | 32 - .../coreos/etcd/mvcc/backend/doc.go | 16 - .../coreos/etcd/mvcc/backend/metrics.go | 31 - vendor/github.com/coreos/etcd/mvcc/doc.go | 16 - vendor/github.com/coreos/etcd/mvcc/index.go | 208 - .../github.com/coreos/etcd/mvcc/key_index.go | 333 - vendor/github.com/coreos/etcd/mvcc/kv.go | 119 - vendor/github.com/coreos/etcd/mvcc/kvstore.go | 710 - .../coreos/etcd/mvcc/kvstore_compaction.go | 66 - vendor/github.com/coreos/etcd/mvcc/metrics.go | 163 - .../coreos/etcd/mvcc/mvccpb/kv.pb.go | 735 - .../github.com/coreos/etcd/mvcc/revision.go | 67 - vendor/github.com/coreos/etcd/mvcc/util.go | 56 - .../coreos/etcd/mvcc/watchable_store.go | 577 - vendor/github.com/coreos/etcd/mvcc/watcher.go | 171 - .../coreos/etcd/mvcc/watcher_group.go | 283 - vendor/github.com/coreos/etcd/pkg/adt/doc.go | 16 - .../coreos/etcd/pkg/adt/interval_tree.go | 531 - .../coreos/etcd/pkg/contention/contention.go | 69 - .../coreos/etcd/pkg/contention/doc.go | 16 - .../github.com/coreos/etcd/pkg/cpuutil/doc.go | 16 - .../coreos/etcd/pkg/cpuutil/endian.go | 36 - vendor/github.com/coreos/etcd/pkg/crc/crc.go | 43 - .../coreos/etcd/pkg/fileutil/dir_unix.go | 22 - .../coreos/etcd/pkg/fileutil/dir_windows.go | 46 - .../coreos/etcd/pkg/fileutil/fileutil.go | 121 - .../coreos/etcd/pkg/fileutil/lock.go | 26 - .../coreos/etcd/pkg/fileutil/lock_flock.go | 49 - .../coreos/etcd/pkg/fileutil/lock_linux.go | 96 - .../coreos/etcd/pkg/fileutil/lock_plan9.go | 45 - .../coreos/etcd/pkg/fileutil/lock_solaris.go | 62 - .../coreos/etcd/pkg/fileutil/lock_unix.go | 29 - .../coreos/etcd/pkg/fileutil/lock_windows.go | 125 - .../coreos/etcd/pkg/fileutil/preallocate.go | 47 - .../etcd/pkg/fileutil/preallocate_darwin.go | 43 - .../etcd/pkg/fileutil/preallocate_unix.go | 49 - .../pkg/fileutil/preallocate_unsupported.go | 25 - .../coreos/etcd/pkg/fileutil/purge.go | 78 - .../coreos/etcd/pkg/fileutil/sync.go | 29 - .../coreos/etcd/pkg/fileutil/sync_darwin.go | 40 - .../coreos/etcd/pkg/fileutil/sync_linux.go | 34 - .../coreos/etcd/pkg/httputil/httputil.go | 31 - .../github.com/coreos/etcd/pkg/idutil/id.go | 78 - .../coreos/etcd/pkg/ioutil/pagewriter.go | 106 - .../coreos/etcd/pkg/ioutil/readcloser.go | 66 - .../coreos/etcd/pkg/ioutil/reader.go | 40 - .../github.com/coreos/etcd/pkg/ioutil/util.go | 43 - .../coreos/etcd/pkg/logutil/merge_logger.go | 195 - .../coreos/etcd/pkg/monotime/issue15006.s | 6 - .../coreos/etcd/pkg/monotime/monotime.go | 26 - .../coreos/etcd/pkg/monotime/nanotime.go | 24 - .../coreos/etcd/pkg/netutil/isolate_linux.go | 82 - .../coreos/etcd/pkg/netutil/isolate_stub.go | 25 - .../coreos/etcd/pkg/netutil/netutil.go | 143 - .../coreos/etcd/pkg/netutil/routes.go | 33 - .../coreos/etcd/pkg/netutil/routes_linux.go | 250 - .../coreos/etcd/pkg/pathutil/path.go | 31 - .../coreos/etcd/pkg/pbutil/pbutil.go | 60 - .../coreos/etcd/pkg/runtime/fds_linux.go | 37 - .../coreos/etcd/pkg/runtime/fds_other.go | 30 - .../coreos/etcd/pkg/schedule/doc.go | 16 - .../coreos/etcd/pkg/schedule/schedule.go | 168 - .../coreos/etcd/pkg/testutil/leak.go | 125 - .../etcd/pkg/testutil/pauseable_handler.go | 57 - .../coreos/etcd/pkg/testutil/recorder.go | 132 - .../coreos/etcd/pkg/testutil/testutil.go | 57 - .../github.com/coreos/etcd/pkg/tlsutil/doc.go | 16 - .../coreos/etcd/pkg/tlsutil/tlsutil.go | 72 - .../coreos/etcd/pkg/transport/doc.go | 17 - .../etcd/pkg/transport/keepalive_listener.go | 94 - .../coreos/etcd/pkg/transport/limit_listen.go | 80 - .../coreos/etcd/pkg/transport/listener.go | 276 - .../coreos/etcd/pkg/transport/timeout_conn.go | 44 - .../etcd/pkg/transport/timeout_dialer.go | 36 - .../etcd/pkg/transport/timeout_listener.go | 58 - .../etcd/pkg/transport/timeout_transport.go | 51 - .../coreos/etcd/pkg/transport/tls.go | 49 - .../coreos/etcd/pkg/transport/transport.go | 71 - .../etcd/pkg/transport/unix_listener.go | 40 - .../github.com/coreos/etcd/pkg/types/doc.go | 17 - vendor/github.com/coreos/etcd/pkg/types/id.go | 41 - .../github.com/coreos/etcd/pkg/types/set.go | 178 - .../github.com/coreos/etcd/pkg/types/slice.go | 22 - .../github.com/coreos/etcd/pkg/types/urls.go | 82 - .../coreos/etcd/pkg/types/urlsmap.go | 107 - .../github.com/coreos/etcd/pkg/wait/wait.go | 84 - .../coreos/etcd/pkg/wait/wait_time.go | 66 - .../coreos/etcd/proxy/grpcproxy/auth.go | 110 - .../etcd/proxy/grpcproxy/cache/store.go | 168 - .../coreos/etcd/proxy/grpcproxy/cluster.go | 52 - .../coreos/etcd/proxy/grpcproxy/doc.go | 16 - .../coreos/etcd/proxy/grpcproxy/kv.go | 210 - .../etcd/proxy/grpcproxy/kv_client_adapter.go | 47 - .../coreos/etcd/proxy/grpcproxy/lease.go | 87 - .../etcd/proxy/grpcproxy/maintenance.go | 74 - .../coreos/etcd/proxy/grpcproxy/metrics.go | 51 - .../coreos/etcd/proxy/grpcproxy/watch.go | 267 - .../etcd/proxy/grpcproxy/watch_broadcast.go | 158 - .../etcd/proxy/grpcproxy/watch_broadcasts.go | 135 - .../proxy/grpcproxy/watch_client_adapter.go | 196 - .../etcd/proxy/grpcproxy/watch_ranges.go | 69 - .../coreos/etcd/proxy/grpcproxy/watcher.go | 127 - vendor/github.com/coreos/etcd/raft/doc.go | 300 - vendor/github.com/coreos/etcd/raft/log.go | 358 - .../coreos/etcd/raft/log_unstable.go | 139 - vendor/github.com/coreos/etcd/raft/logger.go | 126 - vendor/github.com/coreos/etcd/raft/node.go | 521 - .../github.com/coreos/etcd/raft/progress.go | 279 - vendor/github.com/coreos/etcd/raft/raft.go | 1253 - .../coreos/etcd/raft/raftpb/raft.pb.go | 1900 - vendor/github.com/coreos/etcd/raft/rawnode.go | 264 - .../github.com/coreos/etcd/raft/read_only.go | 118 - vendor/github.com/coreos/etcd/raft/status.go | 76 - vendor/github.com/coreos/etcd/raft/storage.go | 271 - vendor/github.com/coreos/etcd/raft/util.go | 129 - .../github.com/coreos/etcd/rafthttp/coder.go | 27 - vendor/github.com/coreos/etcd/rafthttp/doc.go | 16 - .../github.com/coreos/etcd/rafthttp/http.go | 359 - .../coreos/etcd/rafthttp/metrics.go | 73 - .../coreos/etcd/rafthttp/msg_codec.go | 68 - .../coreos/etcd/rafthttp/msgappv2_codec.go | 248 - .../github.com/coreos/etcd/rafthttp/peer.go | 307 - .../coreos/etcd/rafthttp/peer_status.go | 77 - .../coreos/etcd/rafthttp/pipeline.go | 159 - .../coreos/etcd/rafthttp/probing_status.go | 67 - .../github.com/coreos/etcd/rafthttp/remote.go | 69 - .../coreos/etcd/rafthttp/snapshot_sender.go | 155 - .../github.com/coreos/etcd/rafthttp/stream.go | 526 - .../coreos/etcd/rafthttp/transport.go | 402 - .../coreos/etcd/rafthttp/urlpick.go | 57 - .../github.com/coreos/etcd/rafthttp/util.go | 205 - vendor/github.com/coreos/etcd/snap/db.go | 74 - vendor/github.com/coreos/etcd/snap/message.go | 64 - vendor/github.com/coreos/etcd/snap/metrics.go | 41 - .../coreos/etcd/snap/snappb/snap.pb.go | 353 - .../coreos/etcd/snap/snapshotter.go | 204 - vendor/github.com/coreos/etcd/store/doc.go | 16 - vendor/github.com/coreos/etcd/store/event.go | 71 - .../coreos/etcd/store/event_history.go | 129 - .../coreos/etcd/store/event_queue.go | 34 - .../github.com/coreos/etcd/store/metrics.go | 128 - vendor/github.com/coreos/etcd/store/node.go | 396 - .../coreos/etcd/store/node_extern.go | 116 - vendor/github.com/coreos/etcd/store/stats.go | 146 - vendor/github.com/coreos/etcd/store/store.go | 788 - .../coreos/etcd/store/ttl_key_heap.go | 99 - .../github.com/coreos/etcd/store/watcher.go | 95 - .../coreos/etcd/store/watcher_hub.go | 200 - .../github.com/coreos/etcd/version/version.go | 56 - vendor/github.com/coreos/etcd/wal/decoder.go | 185 - vendor/github.com/coreos/etcd/wal/doc.go | 75 - vendor/github.com/coreos/etcd/wal/encoder.go | 120 - .../coreos/etcd/wal/file_pipeline.go | 97 - vendor/github.com/coreos/etcd/wal/metrics.go | 31 - vendor/github.com/coreos/etcd/wal/repair.go | 99 - vendor/github.com/coreos/etcd/wal/util.go | 107 - vendor/github.com/coreos/etcd/wal/wal.go | 637 - vendor/github.com/coreos/etcd/wal/wal_unix.go | 44 - .../github.com/coreos/etcd/wal/wal_windows.go | 41 - .../coreos/etcd/wal/walpb/record.go | 29 - .../coreos/etcd/wal/walpb/record.pb.go | 521 - .../coreos/go-systemd/daemon/sdnotify.go | 84 - .../coreos/go-systemd/daemon/watchdog.go | 73 - .../coreos/go-systemd/journal/journal.go | 182 - .../coreos/ignition/config/v1/cloudinit.go | 53 - .../coreos/ignition/config/v1/config.go | 59 - .../coreos/ignition/config/v1/types/config.go | 27 - .../coreos/ignition/config/v1/types/disk.go | 123 - .../coreos/ignition/config/v1/types/file.go | 39 - .../ignition/config/v1/types/filesystem.go | 45 - .../coreos/ignition/config/v1/types/group.go | 22 - .../ignition/config/v1/types/networkd.go | 19 - .../ignition/config/v1/types/partition.go | 60 - .../coreos/ignition/config/v1/types/passwd.go | 20 - .../coreos/ignition/config/v1/types/path.go | 31 - .../coreos/ignition/config/v1/types/raid.go | 44 - .../ignition/config/v1/types/storage.go | 21 - .../ignition/config/v1/types/systemd.go | 19 - .../coreos/ignition/config/v1/types/unit.go | 73 - .../coreos/ignition/config/v1/types/user.go | 35 - .../coreos/ignition/config/v2_0/append.go | 73 - .../coreos/ignition/config/v2_0/cloudinit.go | 53 - .../coreos/ignition/config/v2_0/config.go | 70 - .../coreos/ignition/config/v2_0/translate.go | 173 - .../ignition/config/v2_0/types/compression.go | 31 - .../ignition/config/v2_0/types/config.go | 87 - .../coreos/ignition/config/v2_0/types/disk.go | 126 - .../coreos/ignition/config/v2_0/types/file.go | 61 - .../ignition/config/v2_0/types/filesystem.go | 60 - .../ignition/config/v2_0/types/group.go | 22 - .../coreos/ignition/config/v2_0/types/hash.go | 72 - .../ignition/config/v2_0/types/ignition.go | 64 - .../ignition/config/v2_0/types/networkd.go | 19 - .../ignition/config/v2_0/types/partition.go | 64 - .../ignition/config/v2_0/types/passwd.go | 20 - .../coreos/ignition/config/v2_0/types/path.go | 35 - .../coreos/ignition/config/v2_0/types/raid.go | 44 - .../ignition/config/v2_0/types/storage.go | 22 - .../ignition/config/v2_0/types/systemd.go | 19 - .../coreos/ignition/config/v2_0/types/unit.go | 115 - .../coreos/ignition/config/v2_0/types/url.go | 69 - .../coreos/ignition/config/v2_0/types/user.go | 35 - .../config/v2_0/types/verification.go | 19 - .../coreos/ignition/config/v2_1/append.go | 72 - .../coreos/ignition/config/v2_1/cloudinit.go | 53 - .../coreos/ignition/config/v2_1/config.go | 68 - .../coreos/ignition/config/v2_1/translate.go | 236 - .../ignition/config/v2_1/types/config.go | 91 - .../ignition/config/v2_1/types/directory.go | 30 - .../coreos/ignition/config/v2_1/types/disk.go | 128 - .../coreos/ignition/config/v2_1/types/file.go | 56 - .../ignition/config/v2_1/types/filesystem.go | 144 - .../ignition/config/v2_1/types/ignition.go | 52 - .../coreos/ignition/config/v2_1/types/link.go | 33 - .../coreos/ignition/config/v2_1/types/mode.go | 26 - .../coreos/ignition/config/v2_1/types/node.go | 73 - .../ignition/config/v2_1/types/partition.go | 77 - .../ignition/config/v2_1/types/passwd.go | 67 - .../coreos/ignition/config/v2_1/types/path.go | 28 - .../coreos/ignition/config/v2_1/types/raid.go | 57 - .../ignition/config/v2_1/types/schema.go | 221 - .../coreos/ignition/config/v2_1/types/unit.go | 109 - .../coreos/ignition/config/v2_1/types/url.go | 45 - .../config/v2_1/types/verification.go | 77 - .../coreos/ignition/config/v2_2/append.go | 76 - .../coreos/ignition/config/v2_2/cloudinit.go | 53 - .../coreos/ignition/config/v2_2/config.go | 71 - .../coreos/ignition/config/v2_2/translate.go | 354 - .../ignition/config/validate/astjson/node.go | 73 - .../config/validate/astnode/astnode.go | 45 - .../ignition/config/validate/validate.go | 242 - vendor/github.com/coreos/pkg/LICENSE | 202 - vendor/github.com/coreos/pkg/NOTICE | 5 - .../coreos/pkg/capnslog/formatters.go | 157 - .../coreos/pkg/capnslog/glog_formatter.go | 96 - vendor/github.com/coreos/pkg/capnslog/init.go | 49 - .../coreos/pkg/capnslog/init_windows.go | 25 - .../coreos/pkg/capnslog/journald_formatter.go | 68 - .../coreos/pkg/capnslog/log_hijack.go | 39 - .../github.com/coreos/pkg/capnslog/logmap.go | 240 - .../coreos/pkg/capnslog/pkg_logger.go | 177 - .../coreos/pkg/capnslog/syslog_formatter.go | 65 - vendor/github.com/coreos/pkg/health/health.go | 127 - .../github.com/coreos/pkg/httputil/cookie.go | 21 - vendor/github.com/coreos/pkg/httputil/json.go | 27 - .../github.com/coreos/pkg/timeutil/backoff.go | 15 - .../config/tectonic-utility/config.go | 34 - .../elazarl/go-bindata-assetfs/LICENSE | 23 - .../elazarl/go-bindata-assetfs/assetfs.go | 147 - .../elazarl/go-bindata-assetfs/doc.go | 13 - .../emicklei/go-restful-swagger12/LICENSE | 22 - .../api_declaration_list.go | 64 - .../emicklei/go-restful-swagger12/config.go | 46 - .../go-restful-swagger12/model_builder.go | 467 - .../go-restful-swagger12/model_list.go | 86 - .../model_property_ext.go | 81 - .../model_property_list.go | 87 - .../go-restful-swagger12/ordered_route_map.go | 36 - .../emicklei/go-restful-swagger12/swagger.go | 185 - .../go-restful-swagger12/swagger_builder.go | 21 - .../swagger_webservice.go | 443 - vendor/github.com/evanphx/json-patch/LICENSE | 25 - vendor/github.com/evanphx/json-patch/merge.go | 305 - vendor/github.com/evanphx/json-patch/patch.go | 643 - vendor/github.com/golang/protobuf/LICENSE | 31 - .../golang/protobuf/jsonpb/jsonpb.go | 1083 - .../github.com/golang/protobuf/proto/clone.go | 229 - .../golang/protobuf/proto/decode.go | 970 - .../golang/protobuf/proto/encode.go | 1362 - .../github.com/golang/protobuf/proto/equal.go | 300 - .../golang/protobuf/proto/extensions.go | 587 - .../github.com/golang/protobuf/proto/lib.go | 897 - .../golang/protobuf/proto/message_set.go | 311 - .../golang/protobuf/proto/pointer_reflect.go | 484 - .../golang/protobuf/proto/pointer_unsafe.go | 270 - .../golang/protobuf/proto/properties.go | 872 - .../github.com/golang/protobuf/proto/text.go | 854 - .../golang/protobuf/proto/text_parser.go | 895 - .../github.com/golang/protobuf/ptypes/any.go | 139 - .../golang/protobuf/ptypes/any/any.pb.go | 178 - .../github.com/golang/protobuf/ptypes/doc.go | 35 - .../golang/protobuf/ptypes/duration.go | 102 - .../protobuf/ptypes/duration/duration.pb.go | 144 - .../protobuf/ptypes/struct/struct.pb.go | 380 - .../golang/protobuf/ptypes/timestamp.go | 134 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 160 - vendor/github.com/googleapis/gnostic/LICENSE | 203 - .../googleapis/gnostic/OpenAPIv2/OpenAPIv2.go | 8728 --- .../gnostic/OpenAPIv2/OpenAPIv2.pb.go | 4456 -- .../googleapis/gnostic/compiler/context.go | 43 - .../googleapis/gnostic/compiler/error.go | 61 - .../gnostic/compiler/extension-handler.go | 101 - .../googleapis/gnostic/compiler/helpers.go | 197 - .../googleapis/gnostic/compiler/main.go | 16 - .../googleapis/gnostic/compiler/reader.go | 167 - .../gnostic/extensions/extension.pb.go | 219 - .../gnostic/extensions/extensions.go | 82 - .../github.com/googleapis/gnostic/gnostic.go | 556 - .../grpc-ecosystem/go-grpc-prometheus/LICENSE | 201 - .../go-grpc-prometheus/client.go | 72 - .../go-grpc-prometheus/client_reporter.go | 111 - .../go-grpc-prometheus/server.go | 74 - .../go-grpc-prometheus/server_reporter.go | 157 - .../grpc-ecosystem/go-grpc-prometheus/util.go | 27 - .../grpc-ecosystem/grpc-gateway/LICENSE.txt | 27 - .../grpc-gateway/runtime/context.go | 143 - .../grpc-gateway/runtime/convert.go | 58 - .../grpc-gateway/runtime/doc.go | 5 - .../grpc-gateway/runtime/errors.go | 121 - .../grpc-gateway/runtime/handler.go | 164 - .../runtime/internal/stream_chunk.pb.go | 65 - .../grpc-gateway/runtime/marshal_json.go | 37 - .../grpc-gateway/runtime/marshal_jsonpb.go | 184 - .../grpc-gateway/runtime/marshaler.go | 42 - .../runtime/marshaler_registry.go | 91 - .../grpc-gateway/runtime/mux.go | 132 - .../grpc-gateway/runtime/pattern.go | 227 - .../grpc-gateway/runtime/proto2_convert.go | 80 - .../grpc-gateway/runtime/query.go | 140 - .../grpc-gateway/utilities/doc.go | 2 - .../grpc-gateway/utilities/pattern.go | 22 - .../grpc-gateway/utilities/trie.go | 177 - vendor/github.com/hashicorp/golang-lru/2q.go | 212 - .../github.com/hashicorp/golang-lru/LICENSE | 362 - vendor/github.com/hashicorp/golang-lru/arc.go | 257 - vendor/github.com/hashicorp/golang-lru/lru.go | 114 - .../hashicorp/golang-lru/simplelru/lru.go | 160 - vendor/github.com/howeyc/gopass/LICENSE.txt | 15 - vendor/github.com/howeyc/gopass/pass.go | 110 - vendor/github.com/howeyc/gopass/terminal.go | 25 - .../howeyc/gopass/terminal_solaris.go | 69 - vendor/github.com/imdario/mergo/LICENSE | 28 - vendor/github.com/imdario/mergo/doc.go | 44 - vendor/github.com/imdario/mergo/map.go | 146 - vendor/github.com/imdario/mergo/merge.go | 99 - vendor/github.com/imdario/mergo/mergo.go | 90 - vendor/github.com/json-iterator/go/LICENSE | 21 - .../json-iterator/go/feature_adapter.go | 127 - .../json-iterator/go/feature_any.go | 242 - .../json-iterator/go/feature_any_array.go | 278 - .../json-iterator/go/feature_any_bool.go | 137 - .../json-iterator/go/feature_any_float.go | 83 - .../json-iterator/go/feature_any_int32.go | 74 - .../json-iterator/go/feature_any_int64.go | 74 - .../json-iterator/go/feature_any_invalid.go | 82 - .../json-iterator/go/feature_any_nil.go | 69 - .../json-iterator/go/feature_any_number.go | 104 - .../json-iterator/go/feature_any_object.go | 374 - .../json-iterator/go/feature_any_string.go | 166 - .../json-iterator/go/feature_any_uint32.go | 74 - .../json-iterator/go/feature_any_uint64.go | 74 - .../json-iterator/go/feature_config.go | 312 - .../json-iterator/go/feature_iter.go | 307 - .../json-iterator/go/feature_iter_array.go | 58 - .../json-iterator/go/feature_iter_float.go | 341 - .../json-iterator/go/feature_iter_int.go | 258 - .../json-iterator/go/feature_iter_object.go | 212 - .../json-iterator/go/feature_iter_skip.go | 127 - .../go/feature_iter_skip_sloppy.go | 144 - .../go/feature_iter_skip_strict.go | 89 - .../json-iterator/go/feature_iter_string.go | 215 - .../json-iterator/go/feature_json_number.go | 15 - .../json-iterator/go/feature_pool.go | 57 - .../json-iterator/go/feature_reflect.go | 691 - .../json-iterator/go/feature_reflect_array.go | 99 - .../go/feature_reflect_extension.go | 413 - .../json-iterator/go/feature_reflect_map.go | 244 - .../go/feature_reflect_native.go | 672 - .../go/feature_reflect_object.go | 196 - .../json-iterator/go/feature_reflect_slice.go | 149 - .../go/feature_reflect_struct_decoder.go | 916 - .../json-iterator/go/feature_stream.go | 305 - .../json-iterator/go/feature_stream_float.go | 96 - .../json-iterator/go/feature_stream_int.go | 320 - .../json-iterator/go/feature_stream_string.go | 396 - .../github.com/json-iterator/go/jsoniter.go | 18 - vendor/github.com/juju/ratelimit/LICENSE | 191 - vendor/github.com/juju/ratelimit/ratelimit.go | 284 - vendor/github.com/juju/ratelimit/reader.go | 51 - .../apiserver-builder/LICENSE | 201 - .../pkg/builders/api_group_builder.go | 143 - .../api_unversioned_resource_builder.go | 125 - .../pkg/builders/api_version_builder.go | 175 - .../api_versioned_resource_builder.go | 181 - .../pkg/builders/default_controller_fns.go | 28 - .../pkg/builders/default_scheme_fns.go | 38 - .../pkg/builders/default_storage_strategy.go | 166 - .../pkg/builders/resource_interfaces.go | 83 - .../apiserver-builder/pkg/builders/scheme.go | 39 - .../golang_protobuf_extensions/LICENSE | 201 - .../pbutil/decode.go | 75 - .../golang_protobuf_extensions/pbutil/doc.go | 16 - .../pbutil/encode.go | 46 - vendor/github.com/mxk/go-flowrate/LICENSE | 29 - .../mxk/go-flowrate/flowrate/flowrate.go | 267 - .../github.com/mxk/go-flowrate/flowrate/io.go | 133 - .../mxk/go-flowrate/flowrate/util.go | 67 - .../prometheus/client_golang/LICENSE | 201 - .../prometheus/client_golang/NOTICE | 23 - .../client_golang/prometheus/collector.go | 75 - .../client_golang/prometheus/counter.go | 164 - .../client_golang/prometheus/desc.go | 200 - .../client_golang/prometheus/doc.go | 186 - .../prometheus/expvar_collector.go | 119 - .../client_golang/prometheus/fnv.go | 29 - .../client_golang/prometheus/gauge.go | 145 - .../client_golang/prometheus/go_collector.go | 276 - .../client_golang/prometheus/histogram.go | 444 - .../client_golang/prometheus/http.go | 524 - .../client_golang/prometheus/metric.go | 166 - .../client_golang/prometheus/observer.go | 50 - .../prometheus/process_collector.go | 140 - .../client_golang/prometheus/registry.go | 755 - .../client_golang/prometheus/summary.go | 543 - .../client_golang/prometheus/timer.go | 48 - .../client_golang/prometheus/untyped.go | 143 - .../client_golang/prometheus/value.go | 239 - .../client_golang/prometheus/vec.go | 404 - .../prometheus/client_model/LICENSE | 201 - .../github.com/prometheus/client_model/NOTICE | 5 - .../prometheus/client_model/go/metrics.pb.go | 364 - vendor/github.com/prometheus/common/LICENSE | 201 - vendor/github.com/prometheus/common/NOTICE | 5 - .../prometheus/common/expfmt/decode.go | 429 - .../prometheus/common/expfmt/encode.go | 88 - .../prometheus/common/expfmt/expfmt.go | 38 - .../prometheus/common/expfmt/fuzz.go | 36 - .../prometheus/common/expfmt/text_create.go | 303 - .../prometheus/common/expfmt/text_parse.go | 753 - .../bitbucket.org/ww/goautoneg/autoneg.go | 162 - .../prometheus/common/model/alert.go | 136 - .../prometheus/common/model/fingerprinting.go | 105 - .../github.com/prometheus/common/model/fnv.go | 42 - .../prometheus/common/model/labels.go | 210 - .../prometheus/common/model/labelset.go | 169 - .../prometheus/common/model/metric.go | 103 - .../prometheus/common/model/model.go | 16 - .../prometheus/common/model/signature.go | 144 - .../prometheus/common/model/silence.go | 106 - .../prometheus/common/model/time.go | 249 - .../prometheus/common/model/value.go | 416 - vendor/github.com/prometheus/procfs/LICENSE | 201 - vendor/github.com/prometheus/procfs/NOTICE | 7 - .../github.com/prometheus/procfs/buddyinfo.go | 95 - vendor/github.com/prometheus/procfs/doc.go | 45 - vendor/github.com/prometheus/procfs/fs.go | 46 - vendor/github.com/prometheus/procfs/ipvs.go | 246 - vendor/github.com/prometheus/procfs/mdstat.go | 138 - .../prometheus/procfs/mountstats.go | 556 - vendor/github.com/prometheus/procfs/proc.go | 224 - .../github.com/prometheus/procfs/proc_io.go | 55 - .../prometheus/procfs/proc_limits.go | 137 - .../github.com/prometheus/procfs/proc_stat.go | 175 - vendor/github.com/prometheus/procfs/stat.go | 56 - vendor/github.com/prometheus/procfs/xfrm.go | 187 - .../github.com/prometheus/procfs/xfs/parse.go | 359 - .../github.com/prometheus/procfs/xfs/xfs.go | 163 - vendor/github.com/ugorji/go/LICENSE | 22 - vendor/github.com/ugorji/go/codec/0doc.go | 199 - vendor/github.com/ugorji/go/codec/binc.go | 929 - vendor/github.com/ugorji/go/codec/cbor.go | 592 - vendor/github.com/ugorji/go/codec/decode.go | 2053 - .../github.com/ugorji/go/codec/decode_go.go | 16 - .../github.com/ugorji/go/codec/decode_go14.go | 14 - vendor/github.com/ugorji/go/codec/encode.go | 1461 - .../ugorji/go/codec/fast-path.generated.go | 39352 ------------ .../ugorji/go/codec/fast-path.not.go | 34 - .../ugorji/go/codec/gen-helper.generated.go | 243 - .../ugorji/go/codec/gen.generated.go | 175 - vendor/github.com/ugorji/go/codec/gen.go | 2014 - vendor/github.com/ugorji/go/codec/gen_15.go | 12 - vendor/github.com/ugorji/go/codec/gen_16.go | 12 - vendor/github.com/ugorji/go/codec/gen_17.go | 10 - vendor/github.com/ugorji/go/codec/helper.go | 1314 - .../ugorji/go/codec/helper_internal.go | 242 - .../ugorji/go/codec/helper_not_unsafe.go | 20 - .../ugorji/go/codec/helper_unsafe.go | 49 - vendor/github.com/ugorji/go/codec/json.go | 1234 - vendor/github.com/ugorji/go/codec/msgpack.go | 852 - vendor/github.com/ugorji/go/codec/noop.go | 213 - vendor/github.com/ugorji/go/codec/prebuild.go | 3 - vendor/github.com/ugorji/go/codec/rpc.go | 180 - vendor/github.com/ugorji/go/codec/simple.go | 526 - vendor/github.com/ugorji/go/codec/time.go | 233 - .../x/crypto/ssh/terminal/terminal.go | 951 - .../golang.org/x/crypto/ssh/terminal/util.go | 114 - .../x/crypto/ssh/terminal/util_bsd.go | 12 - .../x/crypto/ssh/terminal/util_linux.go | 10 - .../x/crypto/ssh/terminal/util_plan9.go | 58 - .../x/crypto/ssh/terminal/util_solaris.go | 124 - .../x/crypto/ssh/terminal/util_windows.go | 103 - vendor/golang.org/x/net/context/context.go | 54 - vendor/golang.org/x/net/context/go17.go | 72 - vendor/golang.org/x/net/context/go19.go | 20 - vendor/golang.org/x/net/context/pre_go17.go | 300 - vendor/golang.org/x/net/context/pre_go19.go | 109 - .../x/net/internal/timeseries/timeseries.go | 525 - vendor/golang.org/x/net/trace/events.go | 532 - vendor/golang.org/x/net/trace/histogram.go | 365 - vendor/golang.org/x/net/trace/trace.go | 1082 - vendor/golang.org/x/net/trace/trace_go16.go | 21 - vendor/golang.org/x/net/trace/trace_go17.go | 21 - .../x/sys/windows/asm_windows_386.s | 13 - .../x/sys/windows/asm_windows_amd64.s | 13 - .../golang.org/x/sys/windows/dll_windows.go | 378 - vendor/golang.org/x/sys/windows/env_unset.go | 15 - .../golang.org/x/sys/windows/env_windows.go | 25 - vendor/golang.org/x/sys/windows/eventlog.go | 20 - .../golang.org/x/sys/windows/exec_windows.go | 97 - .../x/sys/windows/memory_windows.go | 26 - vendor/golang.org/x/sys/windows/mksyscall.go | 7 - vendor/golang.org/x/sys/windows/race.go | 30 - vendor/golang.org/x/sys/windows/race0.go | 25 - .../x/sys/windows/security_windows.go | 435 - vendor/golang.org/x/sys/windows/service.go | 164 - vendor/golang.org/x/sys/windows/str.go | 22 - vendor/golang.org/x/sys/windows/syscall.go | 71 - .../x/sys/windows/syscall_windows.go | 1024 - .../golang.org/x/sys/windows/types_windows.go | 1282 - .../x/sys/windows/types_windows_386.go | 22 - .../x/sys/windows/types_windows_amd64.go | 22 - .../x/sys/windows/zsyscall_windows.go | 2428 - vendor/google.golang.org/genproto/LICENSE | 202 - .../googleapis/rpc/status/status.pb.go | 143 - vendor/google.golang.org/genproto/regen.go | 123 - vendor/google.golang.org/grpc/LICENSE | 28 - vendor/google.golang.org/grpc/PATENTS | 22 - vendor/google.golang.org/grpc/backoff.go | 80 - vendor/google.golang.org/grpc/balancer.go | 400 - vendor/google.golang.org/grpc/call.go | 306 - vendor/google.golang.org/grpc/clientconn.go | 1030 - vendor/google.golang.org/grpc/codec.go | 118 - .../grpc/codes/code_string.go | 16 - vendor/google.golang.org/grpc/codes/codes.go | 159 - .../grpc/credentials/credentials.go | 234 - .../grpc/credentials/credentials_util_go17.go | 75 - .../grpc/credentials/credentials_util_go18.go | 53 - .../credentials/credentials_util_pre_go17.go | 72 - vendor/google.golang.org/grpc/doc.go | 6 - vendor/google.golang.org/grpc/go16.go | 56 - vendor/google.golang.org/grpc/go17.go | 55 - vendor/google.golang.org/grpc/grpclb.go | 749 - .../grpc/grpclb/grpc_lb_v1/grpclb.pb.go | 629 - .../google.golang.org/grpc/grpclog/logger.go | 93 - vendor/google.golang.org/grpc/interceptor.go | 90 - .../grpc/internal/internal.go | 49 - .../grpc/keepalive/keepalive.go | 80 - .../grpc/metadata/metadata.go | 144 - .../google.golang.org/grpc/naming/naming.go | 74 - vendor/google.golang.org/grpc/peer/peer.go | 65 - vendor/google.golang.org/grpc/proxy.go | 145 - vendor/google.golang.org/grpc/rpc_util.go | 501 - vendor/google.golang.org/grpc/server.go | 1108 - .../google.golang.org/grpc/stats/handlers.go | 76 - vendor/google.golang.org/grpc/stats/stats.go | 223 - .../google.golang.org/grpc/status/status.go | 145 - vendor/google.golang.org/grpc/stream.go | 633 - vendor/google.golang.org/grpc/tap/tap.go | 54 - vendor/google.golang.org/grpc/trace.go | 119 - .../grpc/transport/control.go | 207 - .../google.golang.org/grpc/transport/go16.go | 46 - .../google.golang.org/grpc/transport/go17.go | 46 - .../grpc/transport/handler_server.go | 406 - .../grpc/transport/http2_client.go | 1248 - .../grpc/transport/http2_server.go | 1070 - .../grpc/transport/http_util.go | 549 - .../grpc/transport/transport.go | 666 - vendor/k8s.io/api/LICENSE | 202 - vendor/k8s.io/api/admission/v1beta1/doc.go | 21 - .../api/admission/v1beta1/generated.pb.go | 1208 - .../k8s.io/api/admission/v1beta1/register.go | 51 - vendor/k8s.io/api/admission/v1beta1/types.go | 116 - .../v1beta1/types_swagger_doc_generated.go | 71 - .../v1beta1/zz_generated.deepcopy.go | 130 - .../api/admissionregistration/v1alpha1/doc.go | 25 - .../v1alpha1/generated.pb.go | 1028 - .../v1alpha1/register.go | 51 - .../admissionregistration/v1alpha1/types.go | 106 - .../v1alpha1/types_swagger_doc_generated.go | 71 - .../v1alpha1/zz_generated.deepcopy.go | 147 - .../api/admissionregistration/v1beta1/doc.go | 25 - .../v1beta1/generated.pb.go | 2150 - .../admissionregistration/v1beta1/register.go | 53 - .../admissionregistration/v1beta1/types.go | 281 - .../v1beta1/types_swagger_doc_generated.go | 125 - .../v1beta1/zz_generated.deepcopy.go | 321 - vendor/k8s.io/api/apps/v1/doc.go | 20 - vendor/k8s.io/api/apps/v1/generated.pb.go | 6945 --- vendor/k8s.io/api/apps/v1/register.go | 60 - vendor/k8s.io/api/apps/v1/types.go | 819 - .../apps/v1/types_swagger_doc_generated.go | 365 - .../api/apps/v1/zz_generated.deepcopy.go | 866 - vendor/k8s.io/api/apps/v1beta1/doc.go | 20 - .../k8s.io/api/apps/v1beta1/generated.pb.go | 5291 -- vendor/k8s.io/api/apps/v1beta1/register.go | 58 - vendor/k8s.io/api/apps/v1beta1/types.go | 568 - .../v1beta1/types_swagger_doc_generated.go | 273 - .../api/apps/v1beta1/zz_generated.deepcopy.go | 666 - vendor/k8s.io/api/apps/v1beta2/doc.go | 20 - .../k8s.io/api/apps/v1beta2/generated.pb.go | 7586 --- vendor/k8s.io/api/apps/v1beta2/register.go | 61 - vendor/k8s.io/api/apps/v1beta2/types.go | 877 - .../v1beta2/types_swagger_doc_generated.go | 396 - .../api/apps/v1beta2/zz_generated.deepcopy.go | 934 - vendor/k8s.io/api/authentication/v1/doc.go | 20 - .../api/authentication/v1/generated.pb.go | 1302 - .../k8s.io/api/authentication/v1/register.go | 51 - vendor/k8s.io/api/authentication/v1/types.go | 107 - .../v1/types_swagger_doc_generated.go | 72 - .../v1/zz_generated.deepcopy.go | 116 - .../k8s.io/api/authentication/v1beta1/doc.go | 20 - .../authentication/v1beta1/generated.pb.go | 1302 - .../api/authentication/v1beta1/register.go | 51 - .../api/authentication/v1beta1/types.go | 92 - .../v1beta1/types_swagger_doc_generated.go | 72 - .../v1beta1/zz_generated.deepcopy.go | 116 - vendor/k8s.io/api/authorization/v1/doc.go | 21 - .../api/authorization/v1/generated.pb.go | 3528 -- .../k8s.io/api/authorization/v1/register.go | 55 - vendor/k8s.io/api/authorization/v1/types.go | 268 - .../v1/types_swagger_doc_generated.go | 173 - .../authorization/v1/zz_generated.deepcopy.go | 378 - .../k8s.io/api/authorization/v1beta1/doc.go | 21 - .../api/authorization/v1beta1/generated.pb.go | 3529 -- .../api/authorization/v1beta1/register.go | 55 - .../k8s.io/api/authorization/v1beta1/types.go | 268 - .../v1beta1/types_swagger_doc_generated.go | 173 - .../v1beta1/zz_generated.deepcopy.go | 378 - vendor/k8s.io/api/autoscaling/v1/doc.go | 20 - .../k8s.io/api/autoscaling/v1/generated.pb.go | 3786 -- vendor/k8s.io/api/autoscaling/v1/register.go | 53 - vendor/k8s.io/api/autoscaling/v1/types.go | 337 - .../v1/types_swagger_doc_generated.go | 218 - .../autoscaling/v1/zz_generated.deepcopy.go | 478 - vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 20 - .../api/autoscaling/v2beta1/generated.pb.go | 3401 -- .../api/autoscaling/v2beta1/register.go | 52 - .../k8s.io/api/autoscaling/v2beta1/types.go | 312 - .../v2beta1/types_swagger_doc_generated.go | 189 - .../v2beta1/zz_generated.deepcopy.go | 420 - vendor/k8s.io/api/batch/v1/doc.go | 20 - vendor/k8s.io/api/batch/v1/generated.pb.go | 1615 - vendor/k8s.io/api/batch/v1/register.go | 52 - vendor/k8s.io/api/batch/v1/types.go | 181 - .../batch/v1/types_swagger_doc_generated.go | 94 - .../api/batch/v1/zz_generated.deepcopy.go | 219 - vendor/k8s.io/api/batch/v1beta1/doc.go | 20 - .../k8s.io/api/batch/v1beta1/generated.pb.go | 1509 - vendor/k8s.io/api/batch/v1beta1/register.go | 53 - vendor/k8s.io/api/batch/v1beta1/types.go | 158 - .../v1beta1/types_swagger_doc_generated.go | 96 - .../batch/v1beta1/zz_generated.deepcopy.go | 219 - vendor/k8s.io/api/batch/v2alpha1/doc.go | 20 - .../k8s.io/api/batch/v2alpha1/generated.pb.go | 1510 - vendor/k8s.io/api/batch/v2alpha1/register.go | 53 - vendor/k8s.io/api/batch/v2alpha1/types.go | 156 - .../v2alpha1/types_swagger_doc_generated.go | 96 - .../batch/v2alpha1/zz_generated.deepcopy.go | 219 - vendor/k8s.io/api/certificates/v1beta1/doc.go | 21 - .../api/certificates/v1beta1/generated.pb.go | 1693 - .../api/certificates/v1beta1/register.go | 59 - .../k8s.io/api/certificates/v1beta1/types.go | 155 - .../v1beta1/types_swagger_doc_generated.go | 74 - .../v1beta1/zz_generated.deepcopy.go | 172 - .../api/core/v1/annotation_key_constants.go | 88 - vendor/k8s.io/api/core/v1/doc.go | 21 - vendor/k8s.io/api/core/v1/generated.pb.go | 49890 ---------------- vendor/k8s.io/api/core/v1/meta.go | 108 - vendor/k8s.io/api/core/v1/objectreference.go | 33 - vendor/k8s.io/api/core/v1/register.go | 102 - vendor/k8s.io/api/core/v1/resource.go | 63 - vendor/k8s.io/api/core/v1/taint.go | 33 - vendor/k8s.io/api/core/v1/toleration.go | 56 - vendor/k8s.io/api/core/v1/types.go | 5167 -- .../core/v1/types_swagger_doc_generated.go | 2231 - .../api/core/v1/zz_generated.deepcopy.go | 5864 -- vendor/k8s.io/api/events/v1beta1/doc.go | 21 - .../k8s.io/api/events/v1beta1/generated.pb.go | 1306 - vendor/k8s.io/api/events/v1beta1/register.go | 53 - vendor/k8s.io/api/events/v1beta1/types.go | 122 - .../v1beta1/types_swagger_doc_generated.go | 73 - .../events/v1beta1/zz_generated.deepcopy.go | 127 - vendor/k8s.io/api/extensions/v1beta1/doc.go | 20 - .../api/extensions/v1beta1/generated.pb.go | 12513 ---- .../k8s.io/api/extensions/v1beta1/register.go | 66 - vendor/k8s.io/api/extensions/v1beta1/types.go | 1292 - .../v1beta1/types_swagger_doc_generated.go | 644 - .../v1beta1/zz_generated.deepcopy.go | 1603 - vendor/k8s.io/api/networking/v1/doc.go | 20 - .../k8s.io/api/networking/v1/generated.pb.go | 1869 - vendor/k8s.io/api/networking/v1/register.go | 53 - vendor/k8s.io/api/networking/v1/types.go | 196 - .../v1/types_swagger_doc_generated.go | 113 - .../networking/v1/zz_generated.deepcopy.go | 284 - vendor/k8s.io/api/policy/v1beta1/doc.go | 23 - .../k8s.io/api/policy/v1beta1/generated.pb.go | 1454 - vendor/k8s.io/api/policy/v1beta1/register.go | 54 - vendor/k8s.io/api/policy/v1beta1/types.go | 115 - .../v1beta1/types_swagger_doc_generated.go | 83 - .../policy/v1beta1/zz_generated.deepcopy.go | 192 - vendor/k8s.io/api/rbac/v1/doc.go | 21 - vendor/k8s.io/api/rbac/v1/generated.pb.go | 2749 - vendor/k8s.io/api/rbac/v1/register.go | 58 - vendor/k8s.io/api/rbac/v1/types.go | 233 - .../rbac/v1/types_swagger_doc_generated.go | 158 - .../api/rbac/v1/zz_generated.deepcopy.go | 401 - vendor/k8s.io/api/rbac/v1alpha1/doc.go | 21 - .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 2750 - vendor/k8s.io/api/rbac/v1alpha1/register.go | 58 - vendor/k8s.io/api/rbac/v1alpha1/types.go | 235 - .../v1alpha1/types_swagger_doc_generated.go | 158 - .../rbac/v1alpha1/zz_generated.deepcopy.go | 401 - vendor/k8s.io/api/rbac/v1beta1/doc.go | 21 - .../k8s.io/api/rbac/v1beta1/generated.pb.go | 2750 - vendor/k8s.io/api/rbac/v1beta1/register.go | 58 - vendor/k8s.io/api/rbac/v1beta1/types.go | 233 - .../v1beta1/types_swagger_doc_generated.go | 158 - .../api/rbac/v1beta1/zz_generated.deepcopy.go | 401 - vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 21 - .../api/scheduling/v1alpha1/generated.pb.go | 641 - .../api/scheduling/v1alpha1/register.go | 52 - .../k8s.io/api/scheduling/v1alpha1/types.go | 63 - .../v1alpha1/types_swagger_doc_generated.go | 52 - .../v1alpha1/zz_generated.deepcopy.go | 86 - vendor/k8s.io/api/settings/v1alpha1/doc.go | 21 - .../api/settings/v1alpha1/generated.pb.go | 930 - .../k8s.io/api/settings/v1alpha1/register.go | 52 - vendor/k8s.io/api/settings/v1alpha1/types.go | 70 - .../v1alpha1/types_swagger_doc_generated.go | 61 - .../v1alpha1/zz_generated.deepcopy.go | 133 - vendor/k8s.io/api/storage/v1/doc.go | 20 - vendor/k8s.io/api/storage/v1/generated.pb.go | 926 - vendor/k8s.io/api/storage/v1/register.go | 53 - vendor/k8s.io/api/storage/v1/types.go | 98 - .../storage/v1/types_swagger_doc_generated.go | 55 - .../api/storage/v1/zz_generated.deepcopy.go | 126 - vendor/k8s.io/api/storage/v1alpha1/doc.go | 20 - .../api/storage/v1alpha1/generated.pb.go | 1523 - .../k8s.io/api/storage/v1alpha1/register.go | 50 - vendor/k8s.io/api/storage/v1alpha1/types.go | 126 - .../v1alpha1/types_swagger_doc_generated.go | 93 - .../storage/v1alpha1/zz_generated.deepcopy.go | 188 - vendor/k8s.io/api/storage/v1beta1/doc.go | 20 - .../api/storage/v1beta1/generated.pb.go | 926 - vendor/k8s.io/api/storage/v1beta1/register.go | 53 - vendor/k8s.io/api/storage/v1beta1/types.go | 98 - .../v1beta1/types_swagger_doc_generated.go | 55 - .../storage/v1beta1/zz_generated.deepcopy.go | 126 - .../apimachinery/pkg/api/equality/semantic.go | 49 - .../k8s.io/apimachinery/pkg/api/errors/doc.go | 18 - .../apimachinery/pkg/api/errors/errors.go | 546 - .../k8s.io/apimachinery/pkg/api/meta/doc.go | 19 - .../apimachinery/pkg/api/meta/errors.go | 105 - .../pkg/api/meta/firsthit_restmapper.go | 97 - .../k8s.io/apimachinery/pkg/api/meta/help.go | 205 - .../apimachinery/pkg/api/meta/interfaces.go | 149 - .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 121 - .../k8s.io/apimachinery/pkg/api/meta/meta.go | 653 - .../pkg/api/meta/multirestmapper.go | 210 - .../apimachinery/pkg/api/meta/priority.go | 222 - .../apimachinery/pkg/api/meta/restmapper.go | 548 - .../apimachinery/pkg/api/meta/unstructured.go | 47 - .../apimachinery/pkg/api/validation/doc.go | 18 - .../pkg/api/validation/generic.go | 85 - .../pkg/api/validation/objectmeta.go | 343 - .../pkg/api/validation/path/name.go | 68 - .../pkg/apimachinery/announced/announced.go | 99 - .../apimachinery/announced/group_factory.go | 255 - .../apimachinery/pkg/apimachinery/doc.go | 20 - .../pkg/apimachinery/registered/registered.go | 336 - .../apimachinery/pkg/apimachinery/types.go | 87 - .../apis/meta/internalversion/conversion.go | 77 - .../pkg/apis/meta/internalversion/doc.go | 19 - .../pkg/apis/meta/internalversion/register.go | 105 - .../pkg/apis/meta/internalversion/types.go | 70 - .../zz_generated.conversion.go | 113 - .../internalversion/zz_generated.deepcopy.go | 108 - .../pkg/apis/meta/v1/unstructured/helpers.go | 453 - .../apis/meta/v1/unstructured/unstructured.go | 379 - .../meta/v1/unstructured/unstructured_list.go | 189 - .../v1/unstructured/zz_generated.deepcopy.go | 57 - .../pkg/apis/meta/v1/validation/validation.go | 92 - .../pkg/apis/meta/v1alpha1/conversion.go | 27 - .../pkg/apis/meta/v1alpha1/deepcopy.go | 61 - .../pkg/apis/meta/v1alpha1/doc.go | 22 - .../pkg/apis/meta/v1alpha1/generated.pb.go | 633 - .../pkg/apis/meta/v1alpha1/register.go | 57 - .../pkg/apis/meta/v1alpha1/types.go | 161 - .../v1alpha1/types_swagger_doc_generated.go | 104 - .../meta/v1alpha1/zz_generated.deepcopy.go | 194 - .../meta/v1alpha1/zz_generated.defaults.go | 32 - .../pkg/runtime/serializer/codec_factory.go | 237 - .../pkg/runtime/serializer/json/json.go | 278 - .../pkg/runtime/serializer/json/meta.go | 63 - .../runtime/serializer/negotiated_codec.go | 43 - .../pkg/runtime/serializer/protobuf/doc.go | 18 - .../runtime/serializer/protobuf/protobuf.go | 448 - .../runtime/serializer/protobuf_extension.go | 48 - .../serializer/recognizer/recognizer.go | 127 - .../runtime/serializer/streaming/streaming.go | 137 - .../serializer/versioning/versioning.go | 259 - .../apimachinery/pkg/util/cache/cache.go | 83 - .../pkg/util/cache/lruexpirecache.go | 102 - .../apimachinery/pkg/util/clock/clock.go | 327 - .../k8s.io/apimachinery/pkg/util/diff/diff.go | 273 - .../apimachinery/pkg/util/framer/framer.go | 167 - .../apimachinery/pkg/util/httpstream/doc.go | 19 - .../pkg/util/httpstream/httpstream.go | 149 - .../pkg/util/mergepatch/errors.go | 102 - .../apimachinery/pkg/util/mergepatch/util.go | 133 - .../apimachinery/pkg/util/proxy/dial.go | 115 - .../k8s.io/apimachinery/pkg/util/proxy/doc.go | 18 - .../apimachinery/pkg/util/proxy/transport.go | 259 - .../pkg/util/proxy/upgradeaware.go | 413 - .../pkg/util/strategicpatch/errors.go | 49 - .../pkg/util/strategicpatch/meta.go | 194 - .../pkg/util/strategicpatch/patch.go | 2151 - .../pkg/util/strategicpatch/types.go | 193 - .../k8s.io/apimachinery/pkg/util/uuid/uuid.go | 43 - .../apimachinery/pkg/util/waitgroup/doc.go | 19 - .../pkg/util/waitgroup/waitgroup.go | 57 - .../apimachinery/pkg/util/yaml/decoder.go | 346 - vendor/k8s.io/apimachinery/pkg/version/doc.go | 19 - .../k8s.io/apimachinery/pkg/version/types.go | 37 - .../third_party/forked/golang/json/fields.go | 513 - .../third_party/forked/golang/netutil/addr.go | 27 - vendor/k8s.io/apiserver/LICENSE | 202 - .../apiserver/pkg/admission/attributes.go | 85 - .../k8s.io/apiserver/pkg/admission/chain.go | 68 - .../k8s.io/apiserver/pkg/admission/config.go | 197 - .../configuration/configuration_manager.go | 166 - .../configuration/initializer_manager.go | 88 - .../configuration/mutating_webhook_manager.go | 101 - .../validating_webhook_manager.go | 84 - .../k8s.io/apiserver/pkg/admission/errors.go | 72 - .../k8s.io/apiserver/pkg/admission/handler.go | 79 - .../pkg/admission/initializer/initializer.go | 70 - .../pkg/admission/initializer/interfaces.go | 49 - .../apiserver/pkg/admission/interfaces.go | 102 - .../pkg/admission/metrics/metrics.go | 217 - .../plugin/initialization/initialization.go | 368 - .../plugin/namespace/lifecycle/admission.go | 236 - .../config/apis/webhookadmission/doc.go | 19 - .../config/apis/webhookadmission/register.go | 51 - .../config/apis/webhookadmission/types.go | 29 - .../apis/webhookadmission/v1alpha1/doc.go | 23 - .../webhookadmission/v1alpha1/register.go | 50 - .../apis/webhookadmission/v1alpha1/types.go | 29 - .../v1alpha1/zz_generated.conversion.go | 60 - .../v1alpha1/zz_generated.deepcopy.go | 51 - .../v1alpha1/zz_generated.defaults.go | 32 - .../webhookadmission/zz_generated.deepcopy.go | 51 - .../plugin/webhook/config/authentication.go | 156 - .../admission/plugin/webhook/config/client.go | 175 - .../plugin/webhook/config/kubeconfig.go | 68 - .../plugin/webhook/config/serviceresolver.go | 45 - .../admission/plugin/webhook/errors/doc.go | 18 - .../admission/plugin/webhook/errors/errors.go | 34 - .../plugin/webhook/errors/statuserror.go | 47 - .../plugin/webhook/mutating/admission.go | 321 - .../admission/plugin/webhook/mutating/doc.go | 19 - .../admission/plugin/webhook/namespace/doc.go | 20 - .../plugin/webhook/namespace/matcher.go | 117 - .../plugin/webhook/request/admissionreview.go | 71 - .../admission/plugin/webhook/request/doc.go | 18 - .../admission/plugin/webhook/rules/rules.go | 95 - .../plugin/webhook/validating/admission.go | 326 - .../plugin/webhook/validating/doc.go | 19 - .../plugin/webhook/versioned/attributes.go | 42 - .../plugin/webhook/versioned/conversion.go | 67 - .../admission/plugin/webhook/versioned/doc.go | 19 - .../k8s.io/apiserver/pkg/admission/plugins.go | 202 - .../apiserver/pkg/apis/apiserver/doc.go | 21 - .../pkg/apis/apiserver/install/install.go | 43 - .../apiserver/pkg/apis/apiserver/register.go | 50 - .../apiserver/pkg/apis/apiserver/types.go | 50 - .../pkg/apis/apiserver/v1alpha1/conversion.go | 88 - .../pkg/apis/apiserver/v1alpha1/doc.go | 23 - .../pkg/apis/apiserver/v1alpha1/register.go | 52 - .../pkg/apis/apiserver/v1alpha1/types.go | 50 - .../v1alpha1/zz_generated.conversion.go | 110 - .../v1alpha1/zz_generated.deepcopy.go | 75 - .../v1alpha1/zz_generated.defaults.go | 32 - .../apis/apiserver/zz_generated.deepcopy.go | 79 - vendor/k8s.io/apiserver/pkg/apis/audit/doc.go | 19 - .../apiserver/pkg/apis/audit/helpers.go | 38 - .../apiserver/pkg/apis/audit/register.go | 53 - .../k8s.io/apiserver/pkg/apis/audit/types.go | 281 - .../pkg/apis/audit/v1alpha1/conversion.go | 78 - .../apiserver/pkg/apis/audit/v1alpha1/doc.go | 23 - .../pkg/apis/audit/v1alpha1/generated.pb.go | 2700 - .../pkg/apis/audit/v1alpha1/register.go | 58 - .../pkg/apis/audit/v1alpha1/types.go | 265 - .../audit/v1alpha1/zz_generated.conversion.go | 285 - .../audit/v1alpha1/zz_generated.deepcopy.go | 310 - .../audit/v1alpha1/zz_generated.defaults.go | 32 - .../pkg/apis/audit/v1beta1/conversion.go | 45 - .../apiserver/pkg/apis/audit/v1beta1/doc.go | 23 - .../pkg/apis/audit/v1beta1/generated.pb.go | 2737 - .../pkg/apis/audit/v1beta1/register.go | 58 - .../apiserver/pkg/apis/audit/v1beta1/types.go | 266 - .../audit/v1beta1/zz_generated.conversion.go | 280 - .../audit/v1beta1/zz_generated.deepcopy.go | 310 - .../audit/v1beta1/zz_generated.defaults.go | 32 - .../pkg/apis/audit/validation/validation.go | 132 - .../pkg/apis/audit/zz_generated.deepcopy.go | 336 - vendor/k8s.io/apiserver/pkg/audit/format.go | 73 - vendor/k8s.io/apiserver/pkg/audit/metrics.go | 87 - .../apiserver/pkg/audit/policy/checker.go | 206 - .../apiserver/pkg/audit/policy/reader.go | 79 - vendor/k8s.io/apiserver/pkg/audit/request.go | 207 - vendor/k8s.io/apiserver/pkg/audit/scheme.go | 36 - vendor/k8s.io/apiserver/pkg/audit/types.go | 42 - vendor/k8s.io/apiserver/pkg/audit/union.go | 57 - .../authenticator/interfaces.go | 68 - .../authenticatorfactory/delegating.go | 115 - .../authenticatorfactory/loopback.go | 29 - .../authenticatorfactory/requestheader.go | 31 - .../group/authenticated_group_adder.go | 60 - .../pkg/authentication/group/group_adder.go | 50 - .../authentication/group/token_group_adder.go | 48 - .../request/anonymous/anonymous.go | 36 - .../request/bearertoken/bearertoken.go | 68 - .../request/headerrequest/requestheader.go | 179 - .../pkg/authentication/request/union/union.go | 72 - .../request/websocket/protocol.go | 109 - .../pkg/authentication/request/x509/doc.go | 19 - .../pkg/authentication/request/x509/x509.go | 215 - .../pkg/authentication/serviceaccount/util.go | 73 - .../token/tokenfile/tokenfile.go | 97 - .../apiserver/pkg/authentication/user/doc.go | 19 - .../apiserver/pkg/authentication/user/user.go | 83 - .../authorization/authorizer/interfaces.go | 158 - .../pkg/authorization/authorizer/rule.go | 73 - .../authorizerfactory/builtin.go | 94 - .../authorizerfactory/delegating.go | 47 - .../pkg/authorization/union/union.go | 105 - .../apiserver/pkg/endpoints/apiserver.go | 25 - .../pkg/endpoints/discovery/addresses.go | 72 - .../pkg/endpoints/discovery/group.go | 82 - .../pkg/endpoints/discovery/legacy.go | 88 - .../apiserver/pkg/endpoints/discovery/root.go | 145 - .../apiserver/pkg/endpoints/discovery/util.go | 73 - .../pkg/endpoints/discovery/version.go | 93 - vendor/k8s.io/apiserver/pkg/endpoints/doc.go | 18 - .../apiserver/pkg/endpoints/filters/audit.go | 253 - .../pkg/endpoints/filters/authentication.go | 126 - .../pkg/endpoints/filters/authn_audit.go | 87 - .../pkg/endpoints/filters/authorization.go | 90 - .../apiserver/pkg/endpoints/filters/doc.go | 21 - .../pkg/endpoints/filters/impersonation.go | 205 - .../pkg/endpoints/filters/legacy_audit.go | 167 - .../pkg/endpoints/filters/requestinfo.go | 47 - .../apiserver/pkg/endpoints/groupversion.go | 111 - .../pkg/endpoints/handlers/create.go | 168 - .../pkg/endpoints/handlers/delete.go | 282 - .../apiserver/pkg/endpoints/handlers/doc.go | 18 - .../apiserver/pkg/endpoints/handlers/get.go | 278 - .../apiserver/pkg/endpoints/handlers/namer.go | 139 - .../pkg/endpoints/handlers/negotiation/doc.go | 18 - .../endpoints/handlers/negotiation/errors.go | 69 - .../handlers/negotiation/negotiate.go | 332 - .../apiserver/pkg/endpoints/handlers/patch.go | 479 - .../apiserver/pkg/endpoints/handlers/proxy.go | 285 - .../pkg/endpoints/handlers/response.go | 195 - .../endpoints/handlers/responsewriters/doc.go | 18 - .../handlers/responsewriters/errors.go | 97 - .../handlers/responsewriters/status.go | 76 - .../handlers/responsewriters/writers.go | 174 - .../apiserver/pkg/endpoints/handlers/rest.go | 325 - .../pkg/endpoints/handlers/update.go | 135 - .../apiserver/pkg/endpoints/handlers/watch.go | 319 - .../apiserver/pkg/endpoints/installer.go | 1124 - .../pkg/endpoints/metrics/metrics.go | 363 - .../pkg/endpoints/openapi/openapi.go | 167 - .../pkg/endpoints/request/context.go | 160 - .../apiserver/pkg/endpoints/request/doc.go | 20 - .../pkg/endpoints/request/requestcontext.go | 120 - .../pkg/endpoints/request/requestinfo.go | 248 - .../apiserver/pkg/features/kube_features.go | 81 - .../apiserver/pkg/registry/generic/doc.go | 19 - .../apiserver/pkg/registry/generic/matcher.go | 52 - .../apiserver/pkg/registry/generic/options.go | 49 - .../generic/registry/decorated_watcher.go | 97 - .../pkg/registry/generic/registry/doc.go | 19 - .../generic/registry/storage_factory.go | 119 - .../pkg/registry/generic/registry/store.go | 1377 - .../pkg/registry/generic/storage_decorator.go | 60 - .../apiserver/pkg/registry/rest/create.go | 179 - .../apiserver/pkg/registry/rest/delete.go | 137 - .../k8s.io/apiserver/pkg/registry/rest/doc.go | 18 - .../apiserver/pkg/registry/rest/export.go | 33 - .../apiserver/pkg/registry/rest/meta.go | 49 - .../apiserver/pkg/registry/rest/rest.go | 366 - .../apiserver/pkg/registry/rest/table.go | 99 - .../apiserver/pkg/registry/rest/update.go | 257 - .../registry/rest/zz_generated.deepcopy.go | 55 - vendor/k8s.io/apiserver/pkg/server/config.go | 588 - .../apiserver/pkg/server/config_selfclient.go | 83 - vendor/k8s.io/apiserver/pkg/server/doc.go | 18 - .../pkg/server/filters/compression.go | 183 - .../apiserver/pkg/server/filters/cors.go | 98 - .../apiserver/pkg/server/filters/doc.go | 19 - .../pkg/server/filters/longrunning.go | 37 - .../pkg/server/filters/maxinflight.go | 119 - .../apiserver/pkg/server/filters/timeout.go | 274 - .../apiserver/pkg/server/filters/waitgroup.go | 53 - .../apiserver/pkg/server/filters/wrap.go | 43 - .../apiserver/pkg/server/genericapiserver.go | 470 - vendor/k8s.io/apiserver/pkg/server/handler.go | 199 - vendor/k8s.io/apiserver/pkg/server/healthz.go | 45 - .../apiserver/pkg/server/healthz/doc.go | 21 - .../apiserver/pkg/server/healthz/healthz.go | 159 - vendor/k8s.io/apiserver/pkg/server/hooks.go | 230 - .../apiserver/pkg/server/httplog/doc.go | 19 - .../apiserver/pkg/server/httplog/httplog.go | 213 - vendor/k8s.io/apiserver/pkg/server/mux/doc.go | 18 - .../apiserver/pkg/server/mux/pathrecorder.go | 278 - vendor/k8s.io/apiserver/pkg/server/plugins.go | 34 - .../server/routes/data/swagger/datafile.go | 17087 ------ .../k8s.io/apiserver/pkg/server/routes/doc.go | 18 - .../apiserver/pkg/server/routes/index.go | 69 - .../apiserver/pkg/server/routes/metrics.go | 54 - .../apiserver/pkg/server/routes/openapi.go | 39 - .../apiserver/pkg/server/routes/profiling.go | 36 - .../apiserver/pkg/server/routes/swagger.go | 36 - .../apiserver/pkg/server/routes/swaggerui.go | 40 - .../apiserver/pkg/server/routes/version.go | 57 - vendor/k8s.io/apiserver/pkg/server/serve.go | 207 - vendor/k8s.io/apiserver/pkg/server/signal.go | 43 - .../apiserver/pkg/server/signal_posix.go | 26 - .../apiserver/pkg/server/signal_windows.go | 23 - vendor/k8s.io/apiserver/pkg/storage/cacher.go | 994 - vendor/k8s.io/apiserver/pkg/storage/doc.go | 18 - vendor/k8s.io/apiserver/pkg/storage/errors.go | 170 - .../apiserver/pkg/storage/errors/doc.go | 18 - .../apiserver/pkg/storage/errors/storage.go | 116 - .../pkg/storage/etcd/api_object_versioner.go | 109 - .../k8s.io/apiserver/pkg/storage/etcd/doc.go | 17 - .../apiserver/pkg/storage/etcd/etcd_helper.go | 649 - .../pkg/storage/etcd/etcd_watcher.go | 497 - .../pkg/storage/etcd/metrics/metrics.go | 110 - .../apiserver/pkg/storage/etcd/util/doc.go | 19 - .../pkg/storage/etcd/util/etcd_util.go | 99 - .../apiserver/pkg/storage/etcd3/compact.go | 162 - .../apiserver/pkg/storage/etcd3/errors.go | 42 - .../apiserver/pkg/storage/etcd3/event.go | 57 - .../apiserver/pkg/storage/etcd3/store.go | 807 - .../apiserver/pkg/storage/etcd3/watcher.go | 401 - .../apiserver/pkg/storage/interfaces.go | 189 - .../apiserver/pkg/storage/names/generate.go | 54 - .../pkg/storage/selection_predicate.go | 150 - .../pkg/storage/storagebackend/config.go | 77 - .../storage/storagebackend/factory/etcd2.go | 81 - .../storage/storagebackend/factory/etcd3.go | 67 - .../storage/storagebackend/factory/factory.go | 43 - .../apiserver/pkg/storage/time_budget.go | 95 - vendor/k8s.io/apiserver/pkg/storage/util.go | 161 - .../pkg/storage/value/transformer.go | 153 - .../apiserver/pkg/storage/watch_cache.go | 476 - .../pkg/util/feature/feature_gate.go | 302 - .../apiserver/pkg/util/flushwriter/doc.go | 19 - .../apiserver/pkg/util/flushwriter/writer.go | 53 - .../k8s.io/apiserver/pkg/util/trace/trace.go | 89 - .../apiserver/pkg/util/webhook/webhook.go | 122 - .../apiserver/pkg/util/wsstream/conn.go | 352 - .../k8s.io/apiserver/pkg/util/wsstream/doc.go | 21 - .../apiserver/pkg/util/wsstream/stream.go | 177 - .../authenticator/token/webhook/webhook.go | 144 - .../plugin/pkg/authorizer/webhook/webhook.go | 269 - .../client-go/discovery/discovery_client.go | 414 - vendor/k8s.io/client-go/discovery/helper.go | 121 - .../k8s.io/client-go/discovery/restmapper.go | 331 - .../client-go/discovery/unstructured.go | 95 - .../admissionregistration/interface.go | 54 - .../v1alpha1/initializerconfiguration.go | 87 - .../v1alpha1/interface.go | 45 - .../v1beta1/interface.go | 52 - .../v1beta1/mutatingwebhookconfiguration.go | 87 - .../v1beta1/validatingwebhookconfiguration.go | 87 - .../client-go/informers/apps/interface.go | 62 - .../informers/apps/v1/controllerrevision.go | 88 - .../client-go/informers/apps/v1/daemonset.go | 88 - .../client-go/informers/apps/v1/deployment.go | 88 - .../client-go/informers/apps/v1/interface.go | 73 - .../client-go/informers/apps/v1/replicaset.go | 88 - .../informers/apps/v1/statefulset.go | 88 - .../apps/v1beta1/controllerrevision.go | 88 - .../informers/apps/v1beta1/deployment.go | 88 - .../informers/apps/v1beta1/interface.go | 59 - .../informers/apps/v1beta1/statefulset.go | 88 - .../apps/v1beta2/controllerrevision.go | 88 - .../informers/apps/v1beta2/daemonset.go | 88 - .../informers/apps/v1beta2/deployment.go | 88 - .../informers/apps/v1beta2/interface.go | 73 - .../informers/apps/v1beta2/replicaset.go | 88 - .../informers/apps/v1beta2/statefulset.go | 88 - .../informers/autoscaling/interface.go | 54 - .../autoscaling/v1/horizontalpodautoscaler.go | 88 - .../informers/autoscaling/v1/interface.go | 45 - .../v2beta1/horizontalpodautoscaler.go | 88 - .../autoscaling/v2beta1/interface.go | 45 - .../client-go/informers/batch/interface.go | 62 - .../client-go/informers/batch/v1/interface.go | 45 - .../client-go/informers/batch/v1/job.go | 88 - .../informers/batch/v1beta1/cronjob.go | 88 - .../informers/batch/v1beta1/interface.go | 45 - .../informers/batch/v2alpha1/cronjob.go | 88 - .../informers/batch/v2alpha1/interface.go | 45 - .../informers/certificates/interface.go | 46 - .../v1beta1/certificatesigningrequest.go | 87 - .../certificates/v1beta1/interface.go | 45 - .../client-go/informers/core/interface.go | 46 - .../informers/core/v1/componentstatus.go | 87 - .../client-go/informers/core/v1/configmap.go | 88 - .../client-go/informers/core/v1/endpoints.go | 88 - .../client-go/informers/core/v1/event.go | 88 - .../client-go/informers/core/v1/interface.go | 150 - .../client-go/informers/core/v1/limitrange.go | 88 - .../client-go/informers/core/v1/namespace.go | 87 - .../client-go/informers/core/v1/node.go | 87 - .../informers/core/v1/persistentvolume.go | 87 - .../core/v1/persistentvolumeclaim.go | 88 - .../k8s.io/client-go/informers/core/v1/pod.go | 88 - .../informers/core/v1/podtemplate.go | 88 - .../core/v1/replicationcontroller.go | 88 - .../informers/core/v1/resourcequota.go | 88 - .../client-go/informers/core/v1/secret.go | 88 - .../client-go/informers/core/v1/service.go | 88 - .../informers/core/v1/serviceaccount.go | 88 - .../client-go/informers/events/interface.go | 46 - .../informers/events/v1beta1/event.go | 88 - .../informers/events/v1beta1/interface.go | 45 - .../informers/extensions/interface.go | 46 - .../informers/extensions/v1beta1/daemonset.go | 88 - .../extensions/v1beta1/deployment.go | 88 - .../informers/extensions/v1beta1/ingress.go | 88 - .../informers/extensions/v1beta1/interface.go | 73 - .../extensions/v1beta1/podsecuritypolicy.go | 87 - .../extensions/v1beta1/replicaset.go | 88 - vendor/k8s.io/client-go/informers/factory.go | 208 - vendor/k8s.io/client-go/informers/generic.go | 254 - .../internalinterfaces/factory_interfaces.go | 37 - .../informers/networking/interface.go | 46 - .../informers/networking/v1/interface.go | 45 - .../informers/networking/v1/networkpolicy.go | 88 - .../client-go/informers/policy/interface.go | 46 - .../informers/policy/v1beta1/interface.go | 45 - .../policy/v1beta1/poddisruptionbudget.go | 88 - .../client-go/informers/rbac/interface.go | 62 - .../informers/rbac/v1/clusterrole.go | 87 - .../informers/rbac/v1/clusterrolebinding.go | 87 - .../client-go/informers/rbac/v1/interface.go | 66 - .../client-go/informers/rbac/v1/role.go | 88 - .../informers/rbac/v1/rolebinding.go | 88 - .../informers/rbac/v1alpha1/clusterrole.go | 87 - .../rbac/v1alpha1/clusterrolebinding.go | 87 - .../informers/rbac/v1alpha1/interface.go | 66 - .../client-go/informers/rbac/v1alpha1/role.go | 88 - .../informers/rbac/v1alpha1/rolebinding.go | 88 - .../informers/rbac/v1beta1/clusterrole.go | 87 - .../rbac/v1beta1/clusterrolebinding.go | 87 - .../informers/rbac/v1beta1/interface.go | 66 - .../client-go/informers/rbac/v1beta1/role.go | 88 - .../informers/rbac/v1beta1/rolebinding.go | 88 - .../informers/scheduling/interface.go | 46 - .../scheduling/v1alpha1/interface.go | 45 - .../scheduling/v1alpha1/priorityclass.go | 87 - .../client-go/informers/settings/interface.go | 46 - .../informers/settings/v1alpha1/interface.go | 45 - .../informers/settings/v1alpha1/podpreset.go | 88 - .../client-go/informers/storage/interface.go | 62 - .../informers/storage/v1/interface.go | 45 - .../informers/storage/v1/storageclass.go | 87 - .../informers/storage/v1alpha1/interface.go | 45 - .../storage/v1alpha1/volumeattachment.go | 87 - .../informers/storage/v1beta1/interface.go | 45 - .../informers/storage/v1beta1/storageclass.go | 87 - .../k8s.io/client-go/kubernetes/clientset.go | 596 - vendor/k8s.io/client-go/kubernetes/doc.go | 18 - vendor/k8s.io/client-go/kubernetes/import.go | 19 - .../k8s.io/client-go/kubernetes/scheme/doc.go | 18 - .../client-go/kubernetes/scheme/register.go | 107 - .../v1alpha1/admissionregistration_client.go | 88 - .../admissionregistration/v1alpha1/doc.go | 18 - .../v1alpha1/generated_expansion.go | 19 - .../v1alpha1/initializerconfiguration.go | 145 - .../v1beta1/admissionregistration_client.go | 93 - .../admissionregistration/v1beta1/doc.go | 18 - .../v1beta1/generated_expansion.go | 21 - .../v1beta1/mutatingwebhookconfiguration.go | 145 - .../v1beta1/validatingwebhookconfiguration.go | 145 - .../kubernetes/typed/apps/v1/apps_client.go | 108 - .../typed/apps/v1/controllerrevision.go | 155 - .../kubernetes/typed/apps/v1/daemonset.go | 172 - .../kubernetes/typed/apps/v1/deployment.go | 172 - .../client-go/kubernetes/typed/apps/v1/doc.go | 18 - .../typed/apps/v1/generated_expansion.go | 27 - .../kubernetes/typed/apps/v1/replicaset.go | 172 - .../kubernetes/typed/apps/v1/statefulset.go | 172 - .../typed/apps/v1beta1/apps_client.go | 103 - .../typed/apps/v1beta1/controllerrevision.go | 155 - .../typed/apps/v1beta1/deployment.go | 172 - .../kubernetes/typed/apps/v1beta1/doc.go | 18 - .../typed/apps/v1beta1/generated_expansion.go | 25 - .../kubernetes/typed/apps/v1beta1/scale.go | 46 - .../typed/apps/v1beta1/statefulset.go | 172 - .../typed/apps/v1beta2/apps_client.go | 113 - .../typed/apps/v1beta2/controllerrevision.go | 155 - .../typed/apps/v1beta2/daemonset.go | 172 - .../typed/apps/v1beta2/deployment.go | 172 - .../kubernetes/typed/apps/v1beta2/doc.go | 18 - .../typed/apps/v1beta2/generated_expansion.go | 29 - .../typed/apps/v1beta2/replicaset.go | 172 - .../kubernetes/typed/apps/v1beta2/scale.go | 46 - .../typed/apps/v1beta2/statefulset.go | 203 - .../v1/authentication_client.go | 88 - .../kubernetes/typed/authentication/v1/doc.go | 18 - .../authentication/v1/generated_expansion.go | 17 - .../typed/authentication/v1/tokenreview.go | 44 - .../v1/tokenreview_expansion.go | 35 - .../v1beta1/authentication_client.go | 88 - .../typed/authentication/v1beta1/doc.go | 18 - .../v1beta1/generated_expansion.go | 17 - .../authentication/v1beta1/tokenreview.go | 44 - .../v1beta1/tokenreview_expansion.go | 35 - .../authorization/v1/authorization_client.go | 103 - .../kubernetes/typed/authorization/v1/doc.go | 18 - .../authorization/v1/generated_expansion.go | 17 - .../v1/localsubjectaccessreview.go | 46 - .../v1/localsubjectaccessreview_expansion.go | 36 - .../v1/selfsubjectaccessreview.go | 44 - .../v1/selfsubjectaccessreview_expansion.go | 35 - .../v1/selfsubjectrulesreview.go | 44 - .../v1/selfsubjectrulesreview_expansion.go | 35 - .../authorization/v1/subjectaccessreview.go | 44 - .../v1/subjectaccessreview_expansion.go | 36 - .../v1beta1/authorization_client.go | 103 - .../typed/authorization/v1beta1/doc.go | 18 - .../v1beta1/generated_expansion.go | 17 - .../v1beta1/localsubjectaccessreview.go | 46 - .../localsubjectaccessreview_expansion.go | 36 - .../v1beta1/selfsubjectaccessreview.go | 44 - .../selfsubjectaccessreview_expansion.go | 35 - .../v1beta1/selfsubjectrulesreview.go | 44 - .../selfsubjectrulesreview_expansion.go | 35 - .../v1beta1/subjectaccessreview.go | 44 - .../v1beta1/subjectaccessreview_expansion.go | 36 - .../autoscaling/v1/autoscaling_client.go | 88 - .../kubernetes/typed/autoscaling/v1/doc.go | 18 - .../autoscaling/v1/generated_expansion.go | 19 - .../autoscaling/v1/horizontalpodautoscaler.go | 172 - .../autoscaling/v2beta1/autoscaling_client.go | 88 - .../typed/autoscaling/v2beta1/doc.go | 18 - .../v2beta1/generated_expansion.go | 19 - .../v2beta1/horizontalpodautoscaler.go | 172 - .../kubernetes/typed/batch/v1/batch_client.go | 88 - .../kubernetes/typed/batch/v1/doc.go | 18 - .../typed/batch/v1/generated_expansion.go | 19 - .../kubernetes/typed/batch/v1/job.go | 172 - .../typed/batch/v1beta1/batch_client.go | 88 - .../kubernetes/typed/batch/v1beta1/cronjob.go | 172 - .../kubernetes/typed/batch/v1beta1/doc.go | 18 - .../batch/v1beta1/generated_expansion.go | 19 - .../typed/batch/v2alpha1/batch_client.go | 88 - .../typed/batch/v2alpha1/cronjob.go | 172 - .../kubernetes/typed/batch/v2alpha1/doc.go | 18 - .../batch/v2alpha1/generated_expansion.go | 19 - .../v1beta1/certificates_client.go | 88 - .../v1beta1/certificatesigningrequest.go | 161 - .../certificatesigningrequest_expansion.go | 37 - .../typed/certificates/v1beta1/doc.go | 18 - .../v1beta1/generated_expansion.go | 17 - .../typed/core/v1/componentstatus.go | 145 - .../kubernetes/typed/core/v1/configmap.go | 155 - .../kubernetes/typed/core/v1/core_client.go | 163 - .../client-go/kubernetes/typed/core/v1/doc.go | 18 - .../kubernetes/typed/core/v1/endpoints.go | 155 - .../kubernetes/typed/core/v1/event.go | 155 - .../typed/core/v1/event_expansion.go | 164 - .../typed/core/v1/generated_expansion.go | 39 - .../kubernetes/typed/core/v1/limitrange.go | 155 - .../kubernetes/typed/core/v1/namespace.go | 161 - .../typed/core/v1/namespace_expansion.go | 31 - .../kubernetes/typed/core/v1/node.go | 161 - .../typed/core/v1/node_expansion.go | 43 - .../typed/core/v1/persistentvolume.go | 161 - .../typed/core/v1/persistentvolumeclaim.go | 172 - .../client-go/kubernetes/typed/core/v1/pod.go | 172 - .../kubernetes/typed/core/v1/pod_expansion.go | 45 - .../kubernetes/typed/core/v1/podtemplate.go | 155 - .../typed/core/v1/replicationcontroller.go | 204 - .../kubernetes/typed/core/v1/resourcequota.go | 172 - .../kubernetes/typed/core/v1/secret.go | 155 - .../kubernetes/typed/core/v1/service.go | 172 - .../typed/core/v1/service_expansion.go | 41 - .../typed/core/v1/serviceaccount.go | 155 - .../kubernetes/typed/events/v1beta1/doc.go | 18 - .../kubernetes/typed/events/v1beta1/event.go | 155 - .../typed/events/v1beta1/events_client.go | 88 - .../events/v1beta1/generated_expansion.go | 19 - .../typed/extensions/v1beta1/daemonset.go | 172 - .../typed/extensions/v1beta1/deployment.go | 203 - .../v1beta1/deployment_expansion.go | 29 - .../typed/extensions/v1beta1/doc.go | 18 - .../extensions/v1beta1/extensions_client.go | 113 - .../extensions/v1beta1/generated_expansion.go | 25 - .../typed/extensions/v1beta1/ingress.go | 172 - .../extensions/v1beta1/podsecuritypolicy.go | 145 - .../typed/extensions/v1beta1/replicaset.go | 203 - .../typed/extensions/v1beta1/scale.go | 46 - .../extensions/v1beta1/scale_expansion.go | 65 - .../kubernetes/typed/networking/v1/doc.go | 18 - .../networking/v1/generated_expansion.go | 19 - .../typed/networking/v1/networking_client.go | 88 - .../typed/networking/v1/networkpolicy.go | 155 - .../kubernetes/typed/policy/v1beta1/doc.go | 18 - .../typed/policy/v1beta1/eviction.go | 46 - .../policy/v1beta1/eviction_expansion.go | 38 - .../policy/v1beta1/generated_expansion.go | 19 - .../policy/v1beta1/poddisruptionbudget.go | 172 - .../typed/policy/v1beta1/policy_client.go | 93 - .../kubernetes/typed/rbac/v1/clusterrole.go | 145 - .../typed/rbac/v1/clusterrolebinding.go | 145 - .../client-go/kubernetes/typed/rbac/v1/doc.go | 18 - .../typed/rbac/v1/generated_expansion.go | 25 - .../kubernetes/typed/rbac/v1/rbac_client.go | 103 - .../kubernetes/typed/rbac/v1/role.go | 155 - .../kubernetes/typed/rbac/v1/rolebinding.go | 155 - .../typed/rbac/v1alpha1/clusterrole.go | 145 - .../typed/rbac/v1alpha1/clusterrolebinding.go | 145 - .../kubernetes/typed/rbac/v1alpha1/doc.go | 18 - .../rbac/v1alpha1/generated_expansion.go | 25 - .../typed/rbac/v1alpha1/rbac_client.go | 103 - .../kubernetes/typed/rbac/v1alpha1/role.go | 155 - .../typed/rbac/v1alpha1/rolebinding.go | 155 - .../typed/rbac/v1beta1/clusterrole.go | 145 - .../typed/rbac/v1beta1/clusterrolebinding.go | 145 - .../kubernetes/typed/rbac/v1beta1/doc.go | 18 - .../typed/rbac/v1beta1/generated_expansion.go | 25 - .../typed/rbac/v1beta1/rbac_client.go | 103 - .../kubernetes/typed/rbac/v1beta1/role.go | 155 - .../typed/rbac/v1beta1/rolebinding.go | 155 - .../typed/scheduling/v1alpha1/doc.go | 18 - .../v1alpha1/generated_expansion.go | 19 - .../scheduling/v1alpha1/priorityclass.go | 145 - .../scheduling/v1alpha1/scheduling_client.go | 88 - .../kubernetes/typed/settings/v1alpha1/doc.go | 18 - .../settings/v1alpha1/generated_expansion.go | 19 - .../typed/settings/v1alpha1/podpreset.go | 155 - .../settings/v1alpha1/settings_client.go | 88 - .../kubernetes/typed/storage/v1/doc.go | 18 - .../typed/storage/v1/generated_expansion.go | 19 - .../typed/storage/v1/storage_client.go | 88 - .../typed/storage/v1/storageclass.go | 145 - .../kubernetes/typed/storage/v1alpha1/doc.go | 18 - .../storage/v1alpha1/generated_expansion.go | 19 - .../typed/storage/v1alpha1/storage_client.go | 88 - .../storage/v1alpha1/volumeattachment.go | 161 - .../kubernetes/typed/storage/v1beta1/doc.go | 18 - .../storage/v1beta1/generated_expansion.go | 19 - .../typed/storage/v1beta1/storage_client.go | 88 - .../typed/storage/v1beta1/storageclass.go | 145 - .../v1alpha1/expansion_generated.go | 23 - .../v1alpha1/initializerconfiguration.go | 65 - .../v1beta1/expansion_generated.go | 27 - .../v1beta1/mutatingwebhookconfiguration.go | 65 - .../v1beta1/validatingwebhookconfiguration.go | 65 - .../listers/apps/v1/controllerrevision.go | 94 - .../client-go/listers/apps/v1/daemonset.go | 94 - .../listers/apps/v1/daemonset_expansion.go | 113 - .../client-go/listers/apps/v1/deployment.go | 94 - .../listers/apps/v1/deployment_expansion.go | 70 - .../listers/apps/v1/expansion_generated.go | 27 - .../client-go/listers/apps/v1/replicaset.go | 94 - .../listers/apps/v1/replicaset_expansion.go | 73 - .../client-go/listers/apps/v1/statefulset.go | 94 - .../listers/apps/v1/statefulset_expansion.go | 77 - .../apps/v1beta1/controllerrevision.go | 94 - .../listers/apps/v1beta1/deployment.go | 94 - .../apps/v1beta1/expansion_generated.go | 43 - .../client-go/listers/apps/v1beta1/scale.go | 94 - .../listers/apps/v1beta1/statefulset.go | 94 - .../apps/v1beta1/statefulset_expansion.go | 77 - .../apps/v1beta2/controllerrevision.go | 94 - .../listers/apps/v1beta2/daemonset.go | 94 - .../apps/v1beta2/daemonset_expansion.go | 113 - .../listers/apps/v1beta2/deployment.go | 94 - .../apps/v1beta2/deployment_expansion.go | 70 - .../apps/v1beta2/expansion_generated.go | 35 - .../listers/apps/v1beta2/replicaset.go | 94 - .../apps/v1beta2/replicaset_expansion.go | 73 - .../client-go/listers/apps/v1beta2/scale.go | 94 - .../listers/apps/v1beta2/statefulset.go | 94 - .../apps/v1beta2/statefulset_expansion.go | 77 - .../autoscaling/v1/expansion_generated.go | 27 - .../autoscaling/v1/horizontalpodautoscaler.go | 94 - .../v2beta1/expansion_generated.go | 27 - .../v2beta1/horizontalpodautoscaler.go | 94 - .../listers/batch/v1/expansion_generated.go | 19 - .../k8s.io/client-go/listers/batch/v1/job.go | 94 - .../listers/batch/v1/job_expansion.go | 68 - .../listers/batch/v1beta1/cronjob.go | 94 - .../batch/v1beta1/expansion_generated.go | 27 - .../listers/batch/v2alpha1/cronjob.go | 94 - .../batch/v2alpha1/expansion_generated.go | 27 - .../v1beta1/certificatesigningrequest.go | 65 - .../v1beta1/expansion_generated.go | 23 - .../listers/core/v1/componentstatus.go | 65 - .../client-go/listers/core/v1/configmap.go | 94 - .../client-go/listers/core/v1/endpoints.go | 94 - .../k8s.io/client-go/listers/core/v1/event.go | 94 - .../listers/core/v1/expansion_generated.go | 111 - .../client-go/listers/core/v1/limitrange.go | 94 - .../client-go/listers/core/v1/namespace.go | 65 - .../k8s.io/client-go/listers/core/v1/node.go | 65 - .../listers/core/v1/node_expansion.go | 48 - .../listers/core/v1/persistentvolume.go | 65 - .../listers/core/v1/persistentvolumeclaim.go | 94 - .../k8s.io/client-go/listers/core/v1/pod.go | 94 - .../client-go/listers/core/v1/podtemplate.go | 94 - .../listers/core/v1/replicationcontroller.go | 94 - .../v1/replicationcontroller_expansion.go | 66 - .../listers/core/v1/resourcequota.go | 94 - .../client-go/listers/core/v1/secret.go | 94 - .../client-go/listers/core/v1/service.go | 94 - .../listers/core/v1/service_expansion.go | 56 - .../listers/core/v1/serviceaccount.go | 94 - .../client-go/listers/events/v1beta1/event.go | 94 - .../events/v1beta1/expansion_generated.go | 27 - .../listers/extensions/v1beta1/daemonset.go | 94 - .../extensions/v1beta1/daemonset_expansion.go | 114 - .../listers/extensions/v1beta1/deployment.go | 94 - .../v1beta1/deployment_expansion.go | 70 - .../extensions/v1beta1/expansion_generated.go | 39 - .../listers/extensions/v1beta1/ingress.go | 94 - .../extensions/v1beta1/podsecuritypolicy.go | 65 - .../listers/extensions/v1beta1/replicaset.go | 94 - .../v1beta1/replicaset_expansion.go | 73 - .../listers/extensions/v1beta1/scale.go | 94 - .../networking/v1/expansion_generated.go | 27 - .../listers/networking/v1/networkpolicy.go | 94 - .../listers/policy/v1beta1/eviction.go | 94 - .../policy/v1beta1/expansion_generated.go | 27 - .../policy/v1beta1/poddisruptionbudget.go | 94 - .../v1beta1/poddisruptionbudget_expansion.go | 74 - .../client-go/listers/rbac/v1/clusterrole.go | 65 - .../listers/rbac/v1/clusterrolebinding.go | 65 - .../listers/rbac/v1/expansion_generated.go | 43 - .../k8s.io/client-go/listers/rbac/v1/role.go | 94 - .../client-go/listers/rbac/v1/rolebinding.go | 94 - .../listers/rbac/v1alpha1/clusterrole.go | 65 - .../rbac/v1alpha1/clusterrolebinding.go | 65 - .../rbac/v1alpha1/expansion_generated.go | 43 - .../client-go/listers/rbac/v1alpha1/role.go | 94 - .../listers/rbac/v1alpha1/rolebinding.go | 94 - .../listers/rbac/v1beta1/clusterrole.go | 65 - .../rbac/v1beta1/clusterrolebinding.go | 65 - .../rbac/v1beta1/expansion_generated.go | 43 - .../client-go/listers/rbac/v1beta1/role.go | 94 - .../listers/rbac/v1beta1/rolebinding.go | 94 - .../v1alpha1/expansion_generated.go | 23 - .../scheduling/v1alpha1/priorityclass.go | 65 - .../settings/v1alpha1/expansion_generated.go | 27 - .../listers/settings/v1alpha1/podpreset.go | 94 - .../listers/storage/v1/expansion_generated.go | 23 - .../listers/storage/v1/storageclass.go | 65 - .../storage/v1alpha1/expansion_generated.go | 23 - .../storage/v1alpha1/volumeattachment.go | 65 - .../storage/v1beta1/expansion_generated.go | 23 - .../listers/storage/v1beta1/storageclass.go | 65 - vendor/k8s.io/client-go/pkg/version/base.go | 63 - vendor/k8s.io/client-go/pkg/version/doc.go | 20 - .../k8s.io/client-go/pkg/version/version.go | 42 - vendor/k8s.io/client-go/rest/client.go | 258 - vendor/k8s.io/client-go/rest/config.go | 464 - vendor/k8s.io/client-go/rest/plugin.go | 73 - vendor/k8s.io/client-go/rest/request.go | 1132 - vendor/k8s.io/client-go/rest/transport.go | 101 - vendor/k8s.io/client-go/rest/url_utils.go | 97 - vendor/k8s.io/client-go/rest/urlbackoff.go | 107 - vendor/k8s.io/client-go/rest/versions.go | 88 - vendor/k8s.io/client-go/rest/watch/decoder.go | 72 - vendor/k8s.io/client-go/rest/watch/encoder.go | 56 - .../client-go/rest/zz_generated.deepcopy.go | 52 - .../k8s.io/client-go/tools/auth/clientauth.go | 125 - .../client-go/tools/cache/controller.go | 394 - .../client-go/tools/cache/delta_fifo.go | 685 - vendor/k8s.io/client-go/tools/cache/doc.go | 24 - .../client-go/tools/cache/expiration_cache.go | 208 - .../tools/cache/expiration_cache_fakes.go | 54 - .../tools/cache/fake_custom_store.go | 102 - vendor/k8s.io/client-go/tools/cache/fifo.go | 358 - vendor/k8s.io/client-go/tools/cache/heap.go | 323 - vendor/k8s.io/client-go/tools/cache/index.go | 87 - .../k8s.io/client-go/tools/cache/listers.go | 160 - .../k8s.io/client-go/tools/cache/listwatch.go | 178 - .../client-go/tools/cache/mutation_cache.go | 261 - .../tools/cache/mutation_detector.go | 127 - .../k8s.io/client-go/tools/cache/reflector.go | 449 - .../tools/cache/reflector_metrics.go | 119 - .../client-go/tools/cache/shared_informer.go | 584 - vendor/k8s.io/client-go/tools/cache/store.go | 244 - .../tools/cache/thread_safe_store.go | 304 - .../client-go/tools/cache/undelta_store.go | 83 - .../tools/clientcmd/api/latest/latest.go | 66 - .../client-go/tools/clientcmd/auth_loaders.go | 106 - .../tools/clientcmd/client_config.go | 549 - .../client-go/tools/clientcmd/config.go | 472 - .../k8s.io/client-go/tools/clientcmd/doc.go | 37 - .../k8s.io/client-go/tools/clientcmd/flag.go | 49 - .../client-go/tools/clientcmd/helpers.go | 35 - .../client-go/tools/clientcmd/loader.go | 612 - .../tools/clientcmd/merged_client_builder.go | 169 - .../client-go/tools/clientcmd/overrides.go | 247 - .../client-go/tools/clientcmd/validation.go | 275 - .../k8s.io/client-go/tools/metrics/metrics.go | 61 - vendor/k8s.io/client-go/tools/pager/pager.go | 118 - .../k8s.io/client-go/tools/reference/ref.go | 122 - vendor/k8s.io/client-go/transport/cache.go | 92 - vendor/k8s.io/client-go/transport/config.go | 105 - .../client-go/transport/round_trippers.go | 455 - .../k8s.io/client-go/transport/transport.go | 141 - .../client-go/util/buffer/ring_growing.go | 72 - vendor/k8s.io/client-go/util/cert/cert.go | 215 - vendor/k8s.io/client-go/util/cert/csr.go | 75 - vendor/k8s.io/client-go/util/cert/io.go | 158 - vendor/k8s.io/client-go/util/cert/pem.go | 269 - .../client-go/util/flowcontrol/backoff.go | 149 - .../client-go/util/flowcontrol/throttle.go | 148 - .../k8s.io/client-go/util/homedir/homedir.go | 47 - .../k8s.io/client-go/util/integer/integer.go | 67 - vendor/k8s.io/kube-openapi/pkg/builder/doc.go | 20 - .../kube-openapi/pkg/builder/openapi.go | 424 - .../k8s.io/kube-openapi/pkg/builder/util.go | 61 - .../kube-openapi/pkg/handler/handler.go | 204 - vendor/k8s.io/kube-openapi/pkg/util/trie.go | 79 - vendor/k8s.io/kube-openapi/pkg/util/util.go | 39 - .../pkg/apis/cluster/common/consts.go | 131 - .../cluster-api/pkg/apis/cluster/doc.go | 25 - .../apis/cluster/v1alpha1/cluster_types.go | 155 - .../pkg/apis/cluster/v1alpha1/common_types.go | 43 - .../pkg/apis/cluster/v1alpha1/doc.go | 29 - .../apis/cluster/v1alpha1/machine_types.go | 180 - .../v1alpha1/machinedeployment_types.go | 315 - .../apis/cluster/v1alpha1/machineset_types.go | 169 - .../v1alpha1/zz_generated.api.register.go | 210 - .../v1alpha1/zz_generated.conversion.go | 948 - .../cluster/v1alpha1/zz_generated.deepcopy.go | 760 - .../cluster/v1alpha1/zz_generated.defaults.go | 32 - .../apis/cluster/zz_generated.api.register.go | 748 - .../pkg/apis/cluster/zz_generated.deepcopy.go | 760 - .../pkg/apis/cluster/zz_generated.defaults.go | 32 - .../clientset/scheme/doc.go | 17 - .../clientset/scheme/register.go | 52 - .../typed/cluster/v1alpha1/cluster.go | 171 - .../typed/cluster/v1alpha1/cluster_client.go | 102 - .../clientset/typed/cluster/v1alpha1/doc.go | 17 - .../cluster/v1alpha1/generated_expansion.go | 24 - .../typed/cluster/v1alpha1/machine.go | 171 - .../cluster/v1alpha1/machinedeployment.go | 171 - .../typed/cluster/v1alpha1/machineset.go | 171 - 1687 files changed, 24 insertions(+), 495403 deletions(-) delete mode 100644 vendor/bitbucket.org/ww/goautoneg/autoneg.go delete mode 100644 vendor/github.com/NYTimes/gziphandler/LICENSE.md delete mode 100644 vendor/github.com/NYTimes/gziphandler/gzip.go delete mode 100644 vendor/github.com/NYTimes/gziphandler/gzip_go18.go delete mode 100644 vendor/github.com/beorn7/perks/LICENSE delete mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go delete mode 100644 vendor/github.com/coreos/etcd/LICENSE delete mode 100644 vendor/github.com/coreos/etcd/NOTICE delete mode 100644 vendor/github.com/coreos/etcd/alarm/alarms.go delete mode 100644 vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go delete mode 100644 vendor/github.com/coreos/etcd/auth/doc.go delete mode 100644 vendor/github.com/coreos/etcd/auth/range_perm_cache.go delete mode 100644 vendor/github.com/coreos/etcd/auth/simple_token.go delete mode 100644 vendor/github.com/coreos/etcd/auth/store.go delete mode 100644 vendor/github.com/coreos/etcd/client/auth_role.go delete mode 100644 vendor/github.com/coreos/etcd/client/auth_user.go delete mode 100644 vendor/github.com/coreos/etcd/client/cancelreq.go delete mode 100644 vendor/github.com/coreos/etcd/client/client.go delete mode 100644 vendor/github.com/coreos/etcd/client/cluster_error.go delete mode 100644 vendor/github.com/coreos/etcd/client/curl.go delete mode 100644 vendor/github.com/coreos/etcd/client/discover.go delete mode 100644 vendor/github.com/coreos/etcd/client/doc.go delete mode 100644 vendor/github.com/coreos/etcd/client/keys.generated.go delete mode 100644 vendor/github.com/coreos/etcd/client/keys.go delete mode 100644 vendor/github.com/coreos/etcd/client/members.go delete mode 100644 vendor/github.com/coreos/etcd/client/srv.go delete mode 100644 vendor/github.com/coreos/etcd/client/util.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/auth.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/balancer.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/client.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/cluster.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/compact_op.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/compare.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/config.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/doc.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/kv.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/lease.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/logger.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/maintenance.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/op.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/retry.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/sort.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/txn.go delete mode 100644 vendor/github.com/coreos/etcd/clientv3/watch.go delete mode 100644 vendor/github.com/coreos/etcd/compactor/compactor.go delete mode 100644 vendor/github.com/coreos/etcd/compactor/doc.go delete mode 100644 vendor/github.com/coreos/etcd/discovery/discovery.go delete mode 100644 vendor/github.com/coreos/etcd/discovery/srv.go delete mode 100644 vendor/github.com/coreos/etcd/error/error.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/capability.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/cluster.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/doc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/apply.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/apply_auth.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/apply_v2.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/auth/auth.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/cluster_util.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/config.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/consistent_index.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/doc.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/errors.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/membership/errors.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/membership/member.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/membership/store.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/quota.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/raft.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/server.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/stats/leader.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/stats/queue.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/stats/server.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/stats/stats.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/storage.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/util.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/v2_server.go delete mode 100644 vendor/github.com/coreos/etcd/etcdserver/v3_server.go delete mode 100644 vendor/github.com/coreos/etcd/integration/bridge.go delete mode 100644 vendor/github.com/coreos/etcd/integration/cluster.go delete mode 100644 vendor/github.com/coreos/etcd/integration/cluster_direct.go delete mode 100644 vendor/github.com/coreos/etcd/integration/cluster_proxy.go delete mode 100644 vendor/github.com/coreos/etcd/integration/doc.go delete mode 100644 vendor/github.com/coreos/etcd/lease/doc.go delete mode 100644 vendor/github.com/coreos/etcd/lease/leasehttp/doc.go delete mode 100644 vendor/github.com/coreos/etcd/lease/leasehttp/http.go delete mode 100644 vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go delete mode 100644 vendor/github.com/coreos/etcd/lease/lessor.go delete mode 100644 vendor/github.com/coreos/etcd/main.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/backend.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/doc.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/backend/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/doc.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/index.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/key_index.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/kv.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/kvstore.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/revision.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/util.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/watchable_store.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/watcher.go delete mode 100644 vendor/github.com/coreos/etcd/mvcc/watcher_group.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/adt/doc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/contention/contention.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/contention/doc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/crc/crc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/purge.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/sync.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/httputil/httputil.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/idutil/id.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/ioutil/reader.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/ioutil/util.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/monotime/issue15006.s delete mode 100644 vendor/github.com/coreos/etcd/pkg/monotime/monotime.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/monotime/nanotime.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/netutil/netutil.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/netutil/routes.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/pathutil/path.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/schedule/doc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/schedule/schedule.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/testutil/leak.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/testutil/pauseable_handler.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/testutil/recorder.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/testutil/testutil.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/doc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/listener.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/tls.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/transport.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/doc.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/id.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/set.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/slice.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/urls.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/types/urlsmap.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/wait/wait.go delete mode 100644 vendor/github.com/coreos/etcd/pkg/wait/wait_time.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/auth.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/doc.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcasts.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_ranges.go delete mode 100644 vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go delete mode 100644 vendor/github.com/coreos/etcd/raft/doc.go delete mode 100644 vendor/github.com/coreos/etcd/raft/log.go delete mode 100644 vendor/github.com/coreos/etcd/raft/log_unstable.go delete mode 100644 vendor/github.com/coreos/etcd/raft/logger.go delete mode 100644 vendor/github.com/coreos/etcd/raft/node.go delete mode 100644 vendor/github.com/coreos/etcd/raft/progress.go delete mode 100644 vendor/github.com/coreos/etcd/raft/raft.go delete mode 100644 vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go delete mode 100644 vendor/github.com/coreos/etcd/raft/rawnode.go delete mode 100644 vendor/github.com/coreos/etcd/raft/read_only.go delete mode 100644 vendor/github.com/coreos/etcd/raft/status.go delete mode 100644 vendor/github.com/coreos/etcd/raft/storage.go delete mode 100644 vendor/github.com/coreos/etcd/raft/util.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/coder.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/doc.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/http.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/msg_codec.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/peer.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/peer_status.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/pipeline.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/probing_status.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/remote.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/stream.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/transport.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/urlpick.go delete mode 100644 vendor/github.com/coreos/etcd/rafthttp/util.go delete mode 100644 vendor/github.com/coreos/etcd/snap/db.go delete mode 100644 vendor/github.com/coreos/etcd/snap/message.go delete mode 100644 vendor/github.com/coreos/etcd/snap/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go delete mode 100644 vendor/github.com/coreos/etcd/snap/snapshotter.go delete mode 100644 vendor/github.com/coreos/etcd/store/doc.go delete mode 100644 vendor/github.com/coreos/etcd/store/event.go delete mode 100644 vendor/github.com/coreos/etcd/store/event_history.go delete mode 100644 vendor/github.com/coreos/etcd/store/event_queue.go delete mode 100644 vendor/github.com/coreos/etcd/store/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/store/node.go delete mode 100644 vendor/github.com/coreos/etcd/store/node_extern.go delete mode 100644 vendor/github.com/coreos/etcd/store/stats.go delete mode 100644 vendor/github.com/coreos/etcd/store/store.go delete mode 100644 vendor/github.com/coreos/etcd/store/ttl_key_heap.go delete mode 100644 vendor/github.com/coreos/etcd/store/watcher.go delete mode 100644 vendor/github.com/coreos/etcd/store/watcher_hub.go delete mode 100644 vendor/github.com/coreos/etcd/version/version.go delete mode 100644 vendor/github.com/coreos/etcd/wal/decoder.go delete mode 100644 vendor/github.com/coreos/etcd/wal/doc.go delete mode 100644 vendor/github.com/coreos/etcd/wal/encoder.go delete mode 100644 vendor/github.com/coreos/etcd/wal/file_pipeline.go delete mode 100644 vendor/github.com/coreos/etcd/wal/metrics.go delete mode 100644 vendor/github.com/coreos/etcd/wal/repair.go delete mode 100644 vendor/github.com/coreos/etcd/wal/util.go delete mode 100644 vendor/github.com/coreos/etcd/wal/wal.go delete mode 100644 vendor/github.com/coreos/etcd/wal/wal_unix.go delete mode 100644 vendor/github.com/coreos/etcd/wal/wal_windows.go delete mode 100644 vendor/github.com/coreos/etcd/wal/walpb/record.go delete mode 100644 vendor/github.com/coreos/etcd/wal/walpb/record.pb.go delete mode 100644 vendor/github.com/coreos/go-systemd/daemon/sdnotify.go delete mode 100644 vendor/github.com/coreos/go-systemd/daemon/watchdog.go delete mode 100644 vendor/github.com/coreos/go-systemd/journal/journal.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/cloudinit.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/config.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/config.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/disk.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/file.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/filesystem.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/group.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/networkd.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/partition.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/passwd.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/path.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/raid.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/storage.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/systemd.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/unit.go delete mode 100644 vendor/github.com/coreos/ignition/config/v1/types/user.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/append.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/cloudinit.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/config.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/translate.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/compression.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/config.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/disk.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/file.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/filesystem.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/group.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/hash.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/ignition.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/networkd.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/partition.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/passwd.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/path.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/raid.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/storage.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/systemd.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/unit.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/url.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/user.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_0/types/verification.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/append.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/cloudinit.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/config.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/translate.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/config.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/directory.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/disk.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/file.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/filesystem.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/ignition.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/link.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/mode.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/node.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/partition.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/passwd.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/path.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/raid.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/schema.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/unit.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/url.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_1/types/verification.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/append.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/cloudinit.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/config.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/translate.go delete mode 100644 vendor/github.com/coreos/ignition/config/validate/astjson/node.go delete mode 100644 vendor/github.com/coreos/ignition/config/validate/astnode/astnode.go delete mode 100644 vendor/github.com/coreos/ignition/config/validate/validate.go delete mode 100644 vendor/github.com/coreos/pkg/LICENSE delete mode 100644 vendor/github.com/coreos/pkg/NOTICE delete mode 100644 vendor/github.com/coreos/pkg/capnslog/formatters.go delete mode 100644 vendor/github.com/coreos/pkg/capnslog/glog_formatter.go delete mode 100644 vendor/github.com/coreos/pkg/capnslog/init.go delete mode 100644 vendor/github.com/coreos/pkg/capnslog/init_windows.go delete mode 100644 vendor/github.com/coreos/pkg/capnslog/journald_formatter.go delete mode 100644 vendor/github.com/coreos/pkg/capnslog/log_hijack.go delete mode 100644 vendor/github.com/coreos/pkg/capnslog/logmap.go delete mode 100644 vendor/github.com/coreos/pkg/capnslog/pkg_logger.go delete mode 100644 vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go delete mode 100644 vendor/github.com/coreos/pkg/health/health.go delete mode 100644 vendor/github.com/coreos/pkg/httputil/cookie.go delete mode 100644 vendor/github.com/coreos/pkg/httputil/json.go delete mode 100644 vendor/github.com/coreos/pkg/timeutil/backoff.go delete mode 100644 vendor/github.com/coreos/tectonic-config/config/tectonic-utility/config.go delete mode 100644 vendor/github.com/elazarl/go-bindata-assetfs/LICENSE delete mode 100644 vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go delete mode 100644 vendor/github.com/elazarl/go-bindata-assetfs/doc.go delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/LICENSE delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/config.go delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/model_builder.go delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/model_list.go delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/swagger.go delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go delete mode 100644 vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go delete mode 100644 vendor/github.com/evanphx/json-patch/LICENSE delete mode 100644 vendor/github.com/evanphx/json-patch/merge.go delete mode 100644 vendor/github.com/evanphx/json-patch/patch.go delete mode 100644 vendor/github.com/golang/protobuf/LICENSE delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb.go delete mode 100644 vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/golang/protobuf/proto/properties.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 vendor/github.com/googleapis/gnostic/LICENSE delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/context.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/error.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/extension-handler.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/helpers.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/main.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/reader.go delete mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.pb.go delete mode 100644 vendor/github.com/googleapis/gnostic/extensions/extensions.go delete mode 100644 vendor/github.com/googleapis/gnostic/gnostic.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/2q.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/LICENSE delete mode 100644 vendor/github.com/hashicorp/golang-lru/arc.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/lru.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go delete mode 100644 vendor/github.com/howeyc/gopass/LICENSE.txt delete mode 100644 vendor/github.com/howeyc/gopass/pass.go delete mode 100644 vendor/github.com/howeyc/gopass/terminal.go delete mode 100644 vendor/github.com/howeyc/gopass/terminal_solaris.go delete mode 100644 vendor/github.com/imdario/mergo/LICENSE delete mode 100644 vendor/github.com/imdario/mergo/doc.go delete mode 100644 vendor/github.com/imdario/mergo/map.go delete mode 100644 vendor/github.com/imdario/mergo/merge.go delete mode 100644 vendor/github.com/imdario/mergo/mergo.go delete mode 100644 vendor/github.com/json-iterator/go/LICENSE delete mode 100644 vendor/github.com/json-iterator/go/feature_adapter.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_array.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_bool.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_float.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_int32.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_int64.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_invalid.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_nil.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_number.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_object.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_string.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_uint32.go delete mode 100644 vendor/github.com/json-iterator/go/feature_any_uint64.go delete mode 100644 vendor/github.com/json-iterator/go/feature_config.go delete mode 100644 vendor/github.com/json-iterator/go/feature_iter.go delete mode 100644 vendor/github.com/json-iterator/go/feature_iter_array.go delete mode 100644 vendor/github.com/json-iterator/go/feature_iter_float.go delete mode 100644 vendor/github.com/json-iterator/go/feature_iter_int.go delete mode 100644 vendor/github.com/json-iterator/go/feature_iter_object.go delete mode 100644 vendor/github.com/json-iterator/go/feature_iter_skip.go delete mode 100644 vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go delete mode 100644 vendor/github.com/json-iterator/go/feature_iter_skip_strict.go delete mode 100644 vendor/github.com/json-iterator/go/feature_iter_string.go delete mode 100644 vendor/github.com/json-iterator/go/feature_json_number.go delete mode 100644 vendor/github.com/json-iterator/go/feature_pool.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_array.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_extension.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_map.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_native.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_object.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_slice.go delete mode 100644 vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go delete mode 100644 vendor/github.com/json-iterator/go/feature_stream.go delete mode 100644 vendor/github.com/json-iterator/go/feature_stream_float.go delete mode 100644 vendor/github.com/json-iterator/go/feature_stream_int.go delete mode 100644 vendor/github.com/json-iterator/go/feature_stream_string.go delete mode 100644 vendor/github.com/json-iterator/go/jsoniter.go delete mode 100644 vendor/github.com/juju/ratelimit/LICENSE delete mode 100644 vendor/github.com/juju/ratelimit/ratelimit.go delete mode 100644 vendor/github.com/juju/ratelimit/reader.go delete mode 100644 vendor/github.com/kubernetes-incubator/apiserver-builder/LICENSE delete mode 100644 vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/api_group_builder.go delete mode 100644 vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/api_unversioned_resource_builder.go delete mode 100644 vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/api_version_builder.go delete mode 100644 vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/api_versioned_resource_builder.go delete mode 100644 vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/default_controller_fns.go delete mode 100644 vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/default_scheme_fns.go delete mode 100644 vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/default_storage_strategy.go delete mode 100644 vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/resource_interfaces.go delete mode 100644 vendor/github.com/kubernetes-incubator/apiserver-builder/pkg/builders/scheme.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go delete mode 100644 vendor/github.com/mxk/go-flowrate/LICENSE delete mode 100644 vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go delete mode 100644 vendor/github.com/mxk/go-flowrate/flowrate/io.go delete mode 100644 vendor/github.com/mxk/go-flowrate/flowrate/util.go delete mode 100644 vendor/github.com/prometheus/client_golang/LICENSE delete mode 100644 vendor/github.com/prometheus/client_golang/NOTICE delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go delete mode 100644 vendor/github.com/prometheus/client_model/LICENSE delete mode 100644 vendor/github.com/prometheus/client_model/NOTICE delete mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go delete mode 100644 vendor/github.com/prometheus/common/LICENSE delete mode 100644 vendor/github.com/prometheus/common/NOTICE delete mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go delete mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go delete mode 100644 vendor/github.com/prometheus/common/model/alert.go delete mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go delete mode 100644 vendor/github.com/prometheus/common/model/fnv.go delete mode 100644 vendor/github.com/prometheus/common/model/labels.go delete mode 100644 vendor/github.com/prometheus/common/model/labelset.go delete mode 100644 vendor/github.com/prometheus/common/model/metric.go delete mode 100644 vendor/github.com/prometheus/common/model/model.go delete mode 100644 vendor/github.com/prometheus/common/model/signature.go delete mode 100644 vendor/github.com/prometheus/common/model/silence.go delete mode 100644 vendor/github.com/prometheus/common/model/time.go delete mode 100644 vendor/github.com/prometheus/common/model/value.go delete mode 100644 vendor/github.com/prometheus/procfs/LICENSE delete mode 100644 vendor/github.com/prometheus/procfs/NOTICE delete mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go delete mode 100644 vendor/github.com/prometheus/procfs/doc.go delete mode 100644 vendor/github.com/prometheus/procfs/fs.go delete mode 100644 vendor/github.com/prometheus/procfs/ipvs.go delete mode 100644 vendor/github.com/prometheus/procfs/mdstat.go delete mode 100644 vendor/github.com/prometheus/procfs/mountstats.go delete mode 100644 vendor/github.com/prometheus/procfs/proc.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_io.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go delete mode 100644 vendor/github.com/prometheus/procfs/stat.go delete mode 100644 vendor/github.com/prometheus/procfs/xfrm.go delete mode 100644 vendor/github.com/prometheus/procfs/xfs/parse.go delete mode 100644 vendor/github.com/prometheus/procfs/xfs/xfs.go delete mode 100644 vendor/github.com/ugorji/go/LICENSE delete mode 100644 vendor/github.com/ugorji/go/codec/0doc.go delete mode 100644 vendor/github.com/ugorji/go/codec/binc.go delete mode 100644 vendor/github.com/ugorji/go/codec/cbor.go delete mode 100644 vendor/github.com/ugorji/go/codec/decode.go delete mode 100644 vendor/github.com/ugorji/go/codec/decode_go.go delete mode 100644 vendor/github.com/ugorji/go/codec/decode_go14.go delete mode 100644 vendor/github.com/ugorji/go/codec/encode.go delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/fast-path.not.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen.generated.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen_15.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen_16.go delete mode 100644 vendor/github.com/ugorji/go/codec/gen_17.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_internal.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_not_unsafe.go delete mode 100644 vendor/github.com/ugorji/go/codec/helper_unsafe.go delete mode 100644 vendor/github.com/ugorji/go/codec/json.go delete mode 100644 vendor/github.com/ugorji/go/codec/msgpack.go delete mode 100644 vendor/github.com/ugorji/go/codec/noop.go delete mode 100644 vendor/github.com/ugorji/go/codec/prebuild.go delete mode 100644 vendor/github.com/ugorji/go/codec/rpc.go delete mode 100644 vendor/github.com/ugorji/go/codec/simple.go delete mode 100644 vendor/github.com/ugorji/go/codec/time.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_linux.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go delete mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_windows.go delete mode 100644 vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go delete mode 100644 vendor/golang.org/x/net/trace/events.go delete mode 100644 vendor/golang.org/x/net/trace/histogram.go delete mode 100644 vendor/golang.org/x/net/trace/trace.go delete mode 100644 vendor/golang.org/x/net/trace/trace_go16.go delete mode 100644 vendor/golang.org/x/net/trace/trace_go17.go delete mode 100644 vendor/golang.org/x/sys/windows/asm_windows_386.s delete mode 100644 vendor/golang.org/x/sys/windows/asm_windows_amd64.s delete mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/env_unset.go delete mode 100644 vendor/golang.org/x/sys/windows/env_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/eventlog.go delete mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/windows/race.go delete mode 100644 vendor/golang.org/x/sys/windows/race0.go delete mode 100644 vendor/golang.org/x/sys/windows/security_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/service.go delete mode 100644 vendor/golang.org/x/sys/windows/str.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go delete mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go delete mode 100644 vendor/google.golang.org/genproto/LICENSE delete mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go delete mode 100644 vendor/google.golang.org/genproto/regen.go delete mode 100644 vendor/google.golang.org/grpc/LICENSE delete mode 100644 vendor/google.golang.org/grpc/PATENTS delete mode 100644 vendor/google.golang.org/grpc/backoff.go delete mode 100644 vendor/google.golang.org/grpc/balancer.go delete mode 100644 vendor/google.golang.org/grpc/call.go delete mode 100644 vendor/google.golang.org/grpc/clientconn.go delete mode 100644 vendor/google.golang.org/grpc/codec.go delete mode 100644 vendor/google.golang.org/grpc/codes/code_string.go delete mode 100644 vendor/google.golang.org/grpc/codes/codes.go delete mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go delete mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go17.go delete mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go18.go delete mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go delete mode 100644 vendor/google.golang.org/grpc/doc.go delete mode 100644 vendor/google.golang.org/grpc/go16.go delete mode 100644 vendor/google.golang.org/grpc/go17.go delete mode 100644 vendor/google.golang.org/grpc/grpclb.go delete mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/grpclb.pb.go delete mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go delete mode 100644 vendor/google.golang.org/grpc/interceptor.go delete mode 100644 vendor/google.golang.org/grpc/internal/internal.go delete mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go delete mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go delete mode 100644 vendor/google.golang.org/grpc/naming/naming.go delete mode 100644 vendor/google.golang.org/grpc/peer/peer.go delete mode 100644 vendor/google.golang.org/grpc/proxy.go delete mode 100644 vendor/google.golang.org/grpc/rpc_util.go delete mode 100644 vendor/google.golang.org/grpc/server.go delete mode 100644 vendor/google.golang.org/grpc/stats/handlers.go delete mode 100644 vendor/google.golang.org/grpc/stats/stats.go delete mode 100644 vendor/google.golang.org/grpc/status/status.go delete mode 100644 vendor/google.golang.org/grpc/stream.go delete mode 100644 vendor/google.golang.org/grpc/tap/tap.go delete mode 100644 vendor/google.golang.org/grpc/trace.go delete mode 100644 vendor/google.golang.org/grpc/transport/control.go delete mode 100644 vendor/google.golang.org/grpc/transport/go16.go delete mode 100644 vendor/google.golang.org/grpc/transport/go17.go delete mode 100644 vendor/google.golang.org/grpc/transport/handler_server.go delete mode 100644 vendor/google.golang.org/grpc/transport/http2_client.go delete mode 100644 vendor/google.golang.org/grpc/transport/http2_server.go delete mode 100644 vendor/google.golang.org/grpc/transport/http_util.go delete mode 100644 vendor/google.golang.org/grpc/transport/transport.go delete mode 100644 vendor/k8s.io/api/LICENSE delete mode 100644 vendor/k8s.io/api/admission/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/admission/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/admission/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/admission/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/apps/v1/doc.go delete mode 100644 vendor/k8s.io/api/apps/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/apps/v1/register.go delete mode 100644 vendor/k8s.io/api/apps/v1/types.go delete mode 100644 vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/doc.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.pb.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/register.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/types.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authentication/v1/doc.go delete mode 100644 vendor/k8s.io/api/authentication/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authentication/v1/register.go delete mode 100644 vendor/k8s.io/api/authentication/v1/types.go delete mode 100644 vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authorization/v1/doc.go delete mode 100644 vendor/k8s.io/api/authorization/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authorization/v1/register.go delete mode 100644 vendor/k8s.io/api/authorization/v1/types.go delete mode 100644 vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/doc.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/register.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/types.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/doc.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/register.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/batch/v1/doc.go delete mode 100644 vendor/k8s.io/api/batch/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/batch/v1/register.go delete mode 100644 vendor/k8s.io/api/batch/v1/types.go delete mode 100644 vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/register.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/types.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/core/v1/annotation_key_constants.go delete mode 100644 vendor/k8s.io/api/core/v1/doc.go delete mode 100644 vendor/k8s.io/api/core/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/core/v1/meta.go delete mode 100644 vendor/k8s.io/api/core/v1/objectreference.go delete mode 100644 vendor/k8s.io/api/core/v1/register.go delete mode 100644 vendor/k8s.io/api/core/v1/resource.go delete mode 100644 vendor/k8s.io/api/core/v1/taint.go delete mode 100644 vendor/k8s.io/api/core/v1/toleration.go delete mode 100644 vendor/k8s.io/api/core/v1/types.go delete mode 100644 vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/networking/v1/doc.go delete mode 100644 vendor/k8s.io/api/networking/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/networking/v1/register.go delete mode 100644 vendor/k8s.io/api/networking/v1/types.go delete mode 100644 vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/rbac/v1/doc.go delete mode 100644 vendor/k8s.io/api/rbac/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/rbac/v1/register.go delete mode 100644 vendor/k8s.io/api/rbac/v1/types.go delete mode 100644 vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/storage/v1/doc.go delete mode 100644 vendor/k8s.io/api/storage/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/storage/v1/register.go delete mode 100644 vendor/k8s.io/api/storage/v1/types.go delete mode 100644 vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/errors.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/errors.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/help.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/meta.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/priority.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/generic.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/types.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/conversion.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/register.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/cache.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/clock/clock.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/diff/diff.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/framer/framer.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/waitgroup/waitgroup.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/version/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/version/types.go delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go delete mode 100644 vendor/k8s.io/apiserver/LICENSE delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/attributes.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/chain.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/config.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/configuration/configuration_manager.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/configuration/initializer_manager.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/errors.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/handler.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/initializer/initializer.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/initializer/interfaces.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/interfaces.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/register.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/types.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/register.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/types.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/authentication.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/client.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/serviceresolver.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/errors.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors/statuserror.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/admission.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace/matcher.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/request/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/rules/rules.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/admission.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/attributes.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/versioned/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugins.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/register.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/types.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/register.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/types.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1alpha1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/register.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/types.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/validation/validation.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/audit/format.go delete mode 100644 vendor/k8s.io/apiserver/pkg/audit/metrics.go delete mode 100644 vendor/k8s.io/apiserver/pkg/audit/policy/checker.go delete mode 100644 vendor/k8s.io/apiserver/pkg/audit/policy/reader.go delete mode 100644 vendor/k8s.io/apiserver/pkg/audit/request.go delete mode 100644 vendor/k8s.io/apiserver/pkg/audit/scheme.go delete mode 100644 vendor/k8s.io/apiserver/pkg/audit/types.go delete mode 100644 vendor/k8s.io/apiserver/pkg/audit/union.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/loopback.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/user.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go delete mode 100644 vendor/k8s.io/apiserver/pkg/authorization/union/union.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/apiserver.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/addresses.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/group.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/legacy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/root.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/util.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/discovery/version.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/audit.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/authentication.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/authn_audit.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/authorization.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/legacy_audit.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/filters/requestinfo.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/groupversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/create.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/delete.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/namer.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/proxy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/errors.go delete mode 100755 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/status.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters/writers.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/rest.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/update.go delete mode 100755 vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/installer.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/openapi/openapi.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/context.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/requestcontext.go delete mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go delete mode 100644 vendor/k8s.io/apiserver/pkg/features/kube_features.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/matcher.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/options.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/decorated_watcher.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/registry/store.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/create.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/delete.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/export.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/meta.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/rest.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/table.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/update.go delete mode 100644 vendor/k8s.io/apiserver/pkg/registry/rest/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/config.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/config_selfclient.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/compression.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/cors.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/longrunning.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/timeout.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/waitgroup.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/wrap.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/genericapiserver.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/handler.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/healthz.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/healthz/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/hooks.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/httplog/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/mux/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/mux/pathrecorder.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/plugins.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/data/swagger/datafile.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/index.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/metrics.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/openapi.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/profiling.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/swagger.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/swaggerui.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/version.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/serve.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/signal.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/signal_posix.go delete mode 100644 vendor/k8s.io/apiserver/pkg/server/signal_windows.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors/storage.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd/api_object_versioner.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/metrics.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd/util/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd/util/etcd_util.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/interfaces.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/names/generate.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd2.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/time_budget.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/util.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/transformer.go delete mode 100644 vendor/k8s.io/apiserver/pkg/storage/watch_cache.go delete mode 100644 vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go delete mode 100644 vendor/k8s.io/apiserver/pkg/util/flushwriter/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/util/flushwriter/writer.go delete mode 100644 vendor/k8s.io/apiserver/pkg/util/trace/trace.go delete mode 100755 vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go delete mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go delete mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go delete mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go delete mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go delete mode 100644 vendor/k8s.io/client-go/discovery/discovery_client.go delete mode 100644 vendor/k8s.io/client-go/discovery/helper.go delete mode 100644 vendor/k8s.io/client-go/discovery/restmapper.go delete mode 100644 vendor/k8s.io/client-go/discovery/unstructured.go delete mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go delete mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1/daemonset.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1/deployment.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1/replicaset.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1/statefulset.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go delete mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go delete mode 100644 vendor/k8s.io/client-go/informers/autoscaling/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/batch/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/batch/v1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/batch/v1/job.go delete mode 100644 vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go delete mode 100644 vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go delete mode 100644 vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/certificates/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go delete mode 100644 vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/core/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/componentstatus.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/configmap.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/endpoints.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/event.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/limitrange.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/namespace.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/node.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/pod.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/podtemplate.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/resourcequota.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/secret.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/service.go delete mode 100644 vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go delete mode 100644 vendor/k8s.io/client-go/informers/events/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/events/v1beta1/event.go delete mode 100644 vendor/k8s.io/client-go/informers/events/v1beta1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/extensions/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go delete mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go delete mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go delete mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go delete mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go delete mode 100644 vendor/k8s.io/client-go/informers/factory.go delete mode 100644 vendor/k8s.io/client-go/informers/generic.go delete mode 100644 vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go delete mode 100644 vendor/k8s.io/client-go/informers/networking/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/networking/v1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go delete mode 100644 vendor/k8s.io/client-go/informers/policy/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/role.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go delete mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/informers/scheduling/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/informers/settings/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go delete mode 100644 vendor/k8s.io/client-go/informers/storage/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go delete mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/clientset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/import.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/register.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go delete mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1/daemonset.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1/deployment.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1/replicaset.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1/statefulset.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/deployment_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go delete mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/batch/v1/job.go delete mode 100644 vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go delete mode 100644 vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go delete mode 100644 vendor/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go delete mode 100644 vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/componentstatus.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/configmap.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/endpoints.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/event.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/limitrange.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/namespace.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/node.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/node_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/pod.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/podtemplate.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/resourcequota.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/secret.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/service.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/service_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go delete mode 100644 vendor/k8s.io/client-go/listers/events/v1beta1/event.go delete mode 100644 vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go delete mode 100644 vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go delete mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go delete mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go delete mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/role.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go delete mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go delete mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/pkg/version/base.go delete mode 100644 vendor/k8s.io/client-go/pkg/version/doc.go delete mode 100644 vendor/k8s.io/client-go/pkg/version/version.go delete mode 100644 vendor/k8s.io/client-go/rest/client.go delete mode 100644 vendor/k8s.io/client-go/rest/config.go delete mode 100644 vendor/k8s.io/client-go/rest/plugin.go delete mode 100644 vendor/k8s.io/client-go/rest/request.go delete mode 100644 vendor/k8s.io/client-go/rest/transport.go delete mode 100644 vendor/k8s.io/client-go/rest/url_utils.go delete mode 100644 vendor/k8s.io/client-go/rest/urlbackoff.go delete mode 100644 vendor/k8s.io/client-go/rest/versions.go delete mode 100644 vendor/k8s.io/client-go/rest/watch/decoder.go delete mode 100644 vendor/k8s.io/client-go/rest/watch/encoder.go delete mode 100644 vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/tools/auth/clientauth.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/controller.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/delta_fifo.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/doc.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/expiration_cache.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/fake_custom_store.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/fifo.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/heap.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/index.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/listers.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/listwatch.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/mutation_cache.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/mutation_detector.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/reflector.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/reflector_metrics.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/shared_informer.go delete mode 100755 vendor/k8s.io/client-go/tools/cache/store.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/thread_safe_store.go delete mode 100644 vendor/k8s.io/client-go/tools/cache/undelta_store.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/client_config.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/config.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/doc.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/flag.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/helpers.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/loader.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/overrides.go delete mode 100644 vendor/k8s.io/client-go/tools/clientcmd/validation.go delete mode 100644 vendor/k8s.io/client-go/tools/metrics/metrics.go delete mode 100644 vendor/k8s.io/client-go/tools/pager/pager.go delete mode 100644 vendor/k8s.io/client-go/tools/reference/ref.go delete mode 100644 vendor/k8s.io/client-go/transport/cache.go delete mode 100644 vendor/k8s.io/client-go/transport/config.go delete mode 100644 vendor/k8s.io/client-go/transport/round_trippers.go delete mode 100644 vendor/k8s.io/client-go/transport/transport.go delete mode 100644 vendor/k8s.io/client-go/util/buffer/ring_growing.go delete mode 100644 vendor/k8s.io/client-go/util/cert/cert.go delete mode 100644 vendor/k8s.io/client-go/util/cert/csr.go delete mode 100644 vendor/k8s.io/client-go/util/cert/io.go delete mode 100644 vendor/k8s.io/client-go/util/cert/pem.go delete mode 100644 vendor/k8s.io/client-go/util/flowcontrol/backoff.go delete mode 100644 vendor/k8s.io/client-go/util/flowcontrol/throttle.go delete mode 100644 vendor/k8s.io/client-go/util/homedir/homedir.go delete mode 100644 vendor/k8s.io/client-go/util/integer/integer.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/builder/doc.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/builder/openapi.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/builder/util.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/handler/handler.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/util/trie.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/util/util.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/common/consts.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/doc.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/cluster_types.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/common_types.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/doc.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machine_types.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machinedeployment_types.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/machineset_types.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.api.register.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.conversion.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/zz_generated.defaults.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/zz_generated.api.register.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/zz_generated.deepcopy.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/apis/cluster/zz_generated.defaults.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/doc.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/scheme/register.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/cluster_client.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/doc.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/generated_expansion.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machine.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machinedeployment.go delete mode 100644 vendor/sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1/machineset.go diff --git a/glide.lock b/glide.lock index 351c83596b8..a14280591b2 100644 --- a/glide.lock +++ b/glide.lock @@ -1,8 +1,6 @@ hash: 89c59afb258e5da417740d181008c08a7e0ac024c537b04b879cee13fe419fa3 -updated: 2018-09-25T21:12:26.59053433-07:00 +updated: 2018-09-26T15:23:10.801068675-07:00 imports: -- name: bitbucket.org/ww/goautoneg - version: "" - name: github.com/ajeddeloh/go-json version: 73d058cf8437a1989030afe571eeab9f90eebbbd - name: github.com/AlecAivazis/survey @@ -18,7 +16,7 @@ imports: subpackages: - cidr - name: github.com/aws/aws-sdk-go - version: cbd7ed8b82dac1de35d67e576f0b3faf215e1ae9 + version: 85d9dfd77e6d694e83c3ac054141cb5e81eecc7f subpackages: - aws - aws/awserr @@ -59,69 +57,6 @@ imports: - service/s3/s3iface - service/s3/s3manager - service/sts -- name: github.com/beorn7/perks - version: 3ac7bf7a47d159a033b107610db8a1b6575507a4 - subpackages: - - quantile -- name: github.com/coreos/etcd - version: 0520cb9304cb2385f7e72b8bc02d6e4d3257158a - subpackages: - - alarm - - auth - - auth/authpb - - client - - clientv3 - - compactor - - discovery - - error - - etcdserver - - etcdserver/api - - etcdserver/api/v2http - - etcdserver/api/v2http/httptypes - - etcdserver/api/v3rpc - - etcdserver/api/v3rpc/rpctypes - - etcdserver/auth - - etcdserver/etcdserverpb - - etcdserver/membership - - etcdserver/stats - - integration - - lease - - lease/leasehttp - - lease/leasepb - - mvcc - - mvcc/backend - - mvcc/mvccpb - - pkg/adt - - pkg/contention - - pkg/cpuutil - - pkg/crc - - pkg/fileutil - - pkg/httputil - - pkg/idutil - - pkg/ioutil - - pkg/logutil - - pkg/monotime - - pkg/netutil - - pkg/pathutil - - pkg/pbutil - - pkg/runtime - - pkg/schedule - - pkg/testutil - - pkg/tlsutil - - pkg/transport - - pkg/types - - pkg/wait - - proxy/grpcproxy - - proxy/grpcproxy/cache - - raft - - raft/raftpb - - rafthttp - - snap - - snap/snappb - - store - - version - - wal - - wal/walpb - name: github.com/coreos/go-semver version: 294930c1e79c64e7dbe360054274fdad492c8cf5 subpackages: @@ -129,9 +64,7 @@ imports: - name: github.com/coreos/go-systemd version: 39ca1b05acc7ad1220e09f133283b8859a8b71ab subpackages: - - daemon - dbus - - journal - unit - name: github.com/coreos/ignition version: 76107251acd117c6d3e5b4dae2b47f82f944984b @@ -139,47 +72,19 @@ imports: - config/shared/errors - config/shared/validations - config/util - - config/v1 - - config/v1/types - - config/v2_0 - - config/v2_0/types - - config/v2_1 - - config/v2_1/types - - config/v2_2 - config/v2_2/types - config/v2_3_experimental/types - - config/validate - - config/validate/astjson - - config/validate/astnode - config/validate/report -- name: github.com/coreos/pkg - version: fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 - subpackages: - - capnslog - - health - - httputil - - timeutil - name: github.com/coreos/tectonic-config version: 0d649ebfd3552dfa5c6cc2cf053e17ba924b7024 subpackages: - config/kube-addon - config/kube-core - config/tectonic-network - - config/tectonic-utility -- name: github.com/davecgh/go-spew - version: 782f4967f2dc4564575ca782fe2d04090b5faca8 - subpackages: - - spew -- name: github.com/elazarl/go-bindata-assetfs - version: 3dcc96556217539f50599357fb481ac0dc7439b9 - name: github.com/emicklei/go-restful version: ff4f55a206334ef123e4f79bbf348980da81ca46 subpackages: - log -- name: github.com/emicklei/go-restful-swagger12 - version: dcef7f55730566d41eae5db10e7d6981829720f6 -- name: github.com/evanphx/json-patch - version: 944e07253867aacae43c04b2e6a239005443f33a - name: github.com/ghodss/yaml version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - name: github.com/go-ini/ini @@ -199,60 +104,20 @@ imports: - sortkeys - name: github.com/golang/glog version: 44145f04b68cf362d9c4df2182967c2275eaefed -- name: github.com/golang/protobuf - version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 - subpackages: - - jsonpb - - proto - - ptypes - - ptypes/any - - ptypes/duration - - ptypes/struct - - ptypes/timestamp - name: github.com/google/btree version: 7d79101e329e5a3adf994758c578dab82b90c017 - name: github.com/google/gofuzz version: 44d81051d367757e1c7c6a5a86423ece9afcf63c -- name: github.com/googleapis/gnostic - version: 0c5108395e2debce0d731cf0287ddf7242066aba - subpackages: - - OpenAPIv2 - - compiler - - extensions - name: github.com/gregjones/httpcache version: 787624de3eb7bd915c329cba748687a3b22666a6 subpackages: - diskcache -- name: github.com/grpc-ecosystem/go-grpc-prometheus - version: 2500245aa6110c562d17020fb31a2c133d737799 -- name: github.com/grpc-ecosystem/grpc-gateway - version: 84398b94e188ee336f307779b57b3aa91af7063c - subpackages: - - runtime - - runtime/internal - - utilities -- name: github.com/hashicorp/golang-lru - version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 - subpackages: - - simplelru -- name: github.com/howeyc/gopass - version: bf9dde6d0d2c004a008c27aaee91170c786f6db8 -- name: github.com/imdario/mergo - version: 6633656539c1639d9d78127b7d47c622b5d7b6dc - name: github.com/inconshreveable/mousetrap version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 - name: github.com/jmespath/go-jmespath version: c2b33e8439af944379acbdd9c3a5fe0bc44bd8a5 -- name: github.com/json-iterator/go - version: 36b14963da70d11297d313183d7e6388c8510e1e -- name: github.com/juju/ratelimit - version: 5b9ff866471762aa2ab2dced63c9fb6f53921342 - name: github.com/kballard/go-shellquote version: 95032a82bc518f77982ea72343cc1ade730072f0 -- name: github.com/kubernetes-incubator/apiserver-builder - version: 14201e804a58a140011a0044b15331f477535f91 - subpackages: - - pkg/builders - name: github.com/libvirt/libvirt-go version: d5e0adac44d4365a2d97e259f51ab76cc3c01b20 - name: github.com/mailru/easyjson @@ -265,18 +130,8 @@ imports: version: efa589957cd060542a26d2dd7832fd6a6c6c3ade - name: github.com/mattn/go-isatty version: 3fb116b820352b7f0c281308a4d6250c22d94e27 -- name: github.com/matttproud/golang_protobuf_extensions - version: fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a - subpackages: - - pbutil - name: github.com/mgutz/ansi version: 9520e82c474b0a04dd04f8a40959027271bab992 -- name: github.com/mxk/go-flowrate - version: cca7078d478f8520f85629ad7c68962d31ed7682 - subpackages: - - flowrate -- name: github.com/NYTimes/gziphandler - version: 56545f4a5d46df9a6648819d1664c3a03a13ffdb - name: github.com/openshift/hive version: 296cb990171c6da62e134f3590c120bd16ef4d43 subpackages: @@ -285,24 +140,6 @@ imports: version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 - name: github.com/peterbourgon/diskv version: 5f041e8faa004a95c88a202771f4cc3e991971e6 -- name: github.com/prometheus/client_golang - version: e7e903064f5e9eb5da98208bae10b475d4db0f8c - subpackages: - - prometheus -- name: github.com/prometheus/client_model - version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6 - subpackages: - - go -- name: github.com/prometheus/common - version: 13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207 - subpackages: - - expfmt - - internal/bitbucket.org/ww/goautoneg - - model -- name: github.com/prometheus/procfs - version: 65c1f6f8f0fc1e2185eb9863a3bc751496404259 - subpackages: - - xfs - name: github.com/PuerkitoBio/purell version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 - name: github.com/PuerkitoBio/urlesc @@ -313,10 +150,6 @@ imports: version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b - name: github.com/spf13/pflag version: e57e3eeb33f795204c1ca35f56c44f83227c6e66 -- name: github.com/ugorji/go - version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74 - subpackages: - - codec - name: github.com/vincent-petithory/dataurl version: 9a301d65acbb728fcc3ace14f45f511a4cfeea9c - name: go4.org @@ -325,26 +158,20 @@ imports: - errorutil - name: golang.org/x/crypto version: df8d4716b3472e4a531c33cedbe537dae921a1a9 - subpackages: - - ssh/terminal - name: golang.org/x/net version: 1c05540f6879653db88113bc4a2b70aec4bd491f subpackages: - - context - html - html/atom - http2 - http2/hpack - idna - - internal/timeseries - lex/httplex - - trace - websocket - name: golang.org/x/sys version: 95c6576299259db960f6c5b9b69ea52422860fce subpackages: - unix - - windows - name: golang.org/x/text version: b19bf474d317b857955b12035d2c5acb57ce8b01 subpackages: @@ -359,26 +186,6 @@ imports: - unicode/bidi - unicode/norm - width -- name: google.golang.org/genproto - version: 09f6ed296fc66555a25fe4ce95173148778dfa85 - subpackages: - - googleapis/rpc/status -- name: google.golang.org/grpc - version: d2e1b51f33ff8c5e4a15560ff049d200e83726c5 - subpackages: - - codes - - credentials - - grpclb/grpc_lb_v1 - - grpclog - - internal - - keepalive - - metadata - - naming - - peer - - stats - - status - - tap - - transport - name: gopkg.in/AlecAivazis/survey.v1 version: f30c5d1830c892f533140f29a1de89141dc217f5 subpackages: @@ -390,309 +197,50 @@ imports: version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 - name: gopkg.in/yaml.v2 version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 -- name: k8s.io/api - version: af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a - subpackages: - - admission/v1beta1 - - admissionregistration/v1alpha1 - - admissionregistration/v1beta1 - - apps/v1 - - apps/v1beta1 - - apps/v1beta2 - - authentication/v1 - - authentication/v1beta1 - - authorization/v1 - - authorization/v1beta1 - - autoscaling/v1 - - autoscaling/v2beta1 - - batch/v1 - - batch/v1beta1 - - batch/v2alpha1 - - certificates/v1beta1 - - core/v1 - - events/v1beta1 - - extensions/v1beta1 - - networking/v1 - - policy/v1beta1 - - rbac/v1 - - rbac/v1alpha1 - - rbac/v1beta1 - - scheduling/v1alpha1 - - settings/v1alpha1 - - storage/v1 - - storage/v1alpha1 - - storage/v1beta1 - name: k8s.io/apimachinery version: 180eddb345a5be3a157cea1c624700ad5bd27b8f subpackages: - - pkg/api/equality - - pkg/api/errors - - pkg/api/meta - pkg/api/resource - - pkg/api/validation - - pkg/api/validation/path - - pkg/apimachinery - - pkg/apimachinery/announced - - pkg/apimachinery/registered - - pkg/apis/meta/internalversion - pkg/apis/meta/v1 - - pkg/apis/meta/v1/unstructured - - pkg/apis/meta/v1/validation - - pkg/apis/meta/v1alpha1 - pkg/conversion - pkg/conversion/queryparams - pkg/fields - pkg/labels - pkg/runtime - pkg/runtime/schema - - pkg/runtime/serializer - - pkg/runtime/serializer/json - - pkg/runtime/serializer/protobuf - - pkg/runtime/serializer/recognizer - - pkg/runtime/serializer/streaming - - pkg/runtime/serializer/versioning - pkg/selection - pkg/types - - pkg/util/cache - - pkg/util/clock - - pkg/util/diff - pkg/util/errors - - pkg/util/framer - - pkg/util/httpstream - pkg/util/intstr - pkg/util/json - - pkg/util/mergepatch - pkg/util/net - - pkg/util/proxy - pkg/util/rand - pkg/util/runtime - pkg/util/sets - - pkg/util/strategicpatch - - pkg/util/uuid - pkg/util/validation - pkg/util/validation/field - pkg/util/wait - - pkg/util/waitgroup - - pkg/util/yaml - - pkg/version - pkg/watch - - third_party/forked/golang/json - - third_party/forked/golang/netutil - third_party/forked/golang/reflect -- name: k8s.io/apiserver - version: 91e14f394e4796abf5a994a349a222e7081d86b6 - subpackages: - - pkg/admission - - pkg/admission/configuration - - pkg/admission/initializer - - pkg/admission/metrics - - pkg/admission/plugin/initialization - - pkg/admission/plugin/namespace/lifecycle - - pkg/admission/plugin/webhook/config - - pkg/admission/plugin/webhook/config/apis/webhookadmission - - pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1 - - pkg/admission/plugin/webhook/errors - - pkg/admission/plugin/webhook/mutating - - pkg/admission/plugin/webhook/namespace - - pkg/admission/plugin/webhook/request - - pkg/admission/plugin/webhook/rules - - pkg/admission/plugin/webhook/validating - - pkg/admission/plugin/webhook/versioned - - pkg/apis/apiserver - - pkg/apis/apiserver/install - - pkg/apis/apiserver/v1alpha1 - - pkg/apis/audit - - pkg/apis/audit/v1alpha1 - - pkg/apis/audit/v1beta1 - - pkg/apis/audit/validation - - pkg/audit - - pkg/audit/policy - - pkg/authentication/authenticator - - pkg/authentication/authenticatorfactory - - pkg/authentication/group - - pkg/authentication/request/anonymous - - pkg/authentication/request/bearertoken - - pkg/authentication/request/headerrequest - - pkg/authentication/request/union - - pkg/authentication/request/websocket - - pkg/authentication/request/x509 - - pkg/authentication/serviceaccount - - pkg/authentication/token/tokenfile - - pkg/authentication/user - - pkg/authorization/authorizer - - pkg/authorization/authorizerfactory - - pkg/authorization/union - - pkg/endpoints - - pkg/endpoints/discovery - - pkg/endpoints/filters - - pkg/endpoints/handlers - - pkg/endpoints/handlers/negotiation - - pkg/endpoints/handlers/responsewriters - - pkg/endpoints/metrics - - pkg/endpoints/openapi - - pkg/endpoints/request - - pkg/features - - pkg/registry/generic - - pkg/registry/generic/registry - - pkg/registry/rest - - pkg/server - - pkg/server/filters - - pkg/server/healthz - - pkg/server/httplog - - pkg/server/mux - - pkg/server/routes - - pkg/server/routes/data/swagger - - pkg/storage - - pkg/storage/errors - - pkg/storage/etcd - - pkg/storage/etcd/metrics - - pkg/storage/etcd/util - - pkg/storage/etcd3 - - pkg/storage/names - - pkg/storage/storagebackend - - pkg/storage/storagebackend/factory - - pkg/storage/value - - pkg/util/feature - - pkg/util/flushwriter - - pkg/util/trace - - pkg/util/webhook - - pkg/util/wsstream - - plugin/pkg/authenticator/token/webhook - - plugin/pkg/authorizer/webhook - name: k8s.io/client-go version: 78700dec6369ba22221b72770783300f143df150 subpackages: - - discovery - - informers - - informers/admissionregistration - - informers/admissionregistration/v1alpha1 - - informers/admissionregistration/v1beta1 - - informers/apps - - informers/apps/v1 - - informers/apps/v1beta1 - - informers/apps/v1beta2 - - informers/autoscaling - - informers/autoscaling/v1 - - informers/autoscaling/v2beta1 - - informers/batch - - informers/batch/v1 - - informers/batch/v1beta1 - - informers/batch/v2alpha1 - - informers/certificates - - informers/certificates/v1beta1 - - informers/core - - informers/core/v1 - - informers/events - - informers/events/v1beta1 - - informers/extensions - - informers/extensions/v1beta1 - - informers/internalinterfaces - - informers/networking - - informers/networking/v1 - - informers/policy - - informers/policy/v1beta1 - - informers/rbac - - informers/rbac/v1 - - informers/rbac/v1alpha1 - - informers/rbac/v1beta1 - - informers/scheduling - - informers/scheduling/v1alpha1 - - informers/settings - - informers/settings/v1alpha1 - - informers/storage - - informers/storage/v1 - - informers/storage/v1alpha1 - - informers/storage/v1beta1 - - kubernetes - - kubernetes/scheme - - kubernetes/typed/admissionregistration/v1alpha1 - - kubernetes/typed/admissionregistration/v1beta1 - - kubernetes/typed/apps/v1 - - kubernetes/typed/apps/v1beta1 - - kubernetes/typed/apps/v1beta2 - - kubernetes/typed/authentication/v1 - - kubernetes/typed/authentication/v1beta1 - - kubernetes/typed/authorization/v1 - - kubernetes/typed/authorization/v1beta1 - - kubernetes/typed/autoscaling/v1 - - kubernetes/typed/autoscaling/v2beta1 - - kubernetes/typed/batch/v1 - - kubernetes/typed/batch/v1beta1 - - kubernetes/typed/batch/v2alpha1 - - kubernetes/typed/certificates/v1beta1 - - kubernetes/typed/core/v1 - - kubernetes/typed/events/v1beta1 - - kubernetes/typed/extensions/v1beta1 - - kubernetes/typed/networking/v1 - - kubernetes/typed/policy/v1beta1 - - kubernetes/typed/rbac/v1 - - kubernetes/typed/rbac/v1alpha1 - - kubernetes/typed/rbac/v1beta1 - - kubernetes/typed/scheduling/v1alpha1 - - kubernetes/typed/settings/v1alpha1 - - kubernetes/typed/storage/v1 - - kubernetes/typed/storage/v1alpha1 - - kubernetes/typed/storage/v1beta1 - - listers/admissionregistration/v1alpha1 - - listers/admissionregistration/v1beta1 - - listers/apps/v1 - - listers/apps/v1beta1 - - listers/apps/v1beta2 - - listers/autoscaling/v1 - - listers/autoscaling/v2beta1 - - listers/batch/v1 - - listers/batch/v1beta1 - - listers/batch/v2alpha1 - - listers/certificates/v1beta1 - - listers/core/v1 - - listers/events/v1beta1 - - listers/extensions/v1beta1 - - listers/networking/v1 - - listers/policy/v1beta1 - - listers/rbac/v1 - - listers/rbac/v1alpha1 - - listers/rbac/v1beta1 - - listers/scheduling/v1alpha1 - - listers/settings/v1alpha1 - - listers/storage/v1 - - listers/storage/v1alpha1 - - listers/storage/v1beta1 - - pkg/version - - rest - - rest/watch - - tools/auth - - tools/cache - - tools/clientcmd - tools/clientcmd/api - - tools/clientcmd/api/latest - tools/clientcmd/api/v1 - - tools/metrics - - tools/pager - - tools/reference - - transport - - util/buffer - - util/cert - - util/flowcontrol - - util/homedir - - util/integer - name: k8s.io/kube-openapi version: 39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1 subpackages: - - pkg/builder - pkg/common - - pkg/handler - - pkg/util - pkg/util/proto - name: sigs.k8s.io/cluster-api version: fbc85d97affbf5c9ad97054c66b23ad2bc0ca097 subpackages: - - pkg/apis/cluster - - pkg/apis/cluster/common - - pkg/apis/cluster/v1alpha1 - pkg/client/clientset_generated/clientset - - pkg/client/clientset_generated/clientset/scheme - - pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1 testImports: +- name: github.com/davecgh/go-spew + version: 782f4967f2dc4564575ca782fe2d04090b5faca8 + subpackages: + - spew - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: diff --git a/vendor/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 648b38cb654..00000000000 --- a/vendor/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/NYTimes/gziphandler/LICENSE.md b/vendor/github.com/NYTimes/gziphandler/LICENSE.md deleted file mode 100644 index b7e2ecb63f9..00000000000 --- a/vendor/github.com/NYTimes/gziphandler/LICENSE.md +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2015 The New York Times Company - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this library except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go deleted file mode 100644 index ea6dba1e79d..00000000000 --- a/vendor/github.com/NYTimes/gziphandler/gzip.go +++ /dev/null @@ -1,332 +0,0 @@ -package gziphandler - -import ( - "bufio" - "compress/gzip" - "fmt" - "io" - "net" - "net/http" - "strconv" - "strings" - "sync" -) - -const ( - vary = "Vary" - acceptEncoding = "Accept-Encoding" - contentEncoding = "Content-Encoding" - contentType = "Content-Type" - contentLength = "Content-Length" -) - -type codings map[string]float64 - -const ( - // DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set. - // This is actually kind of ambiguous in RFC 2616, so hopefully it's correct. - // The examples seem to indicate that it is. - DefaultQValue = 1.0 - - // DefaultMinSize defines the minimum size to reach to enable compression. - // It's 512 bytes. - DefaultMinSize = 512 -) - -// gzipWriterPools stores a sync.Pool for each compression level for reuse of -// gzip.Writers. Use poolIndex to covert a compression level to an index into -// gzipWriterPools. -var gzipWriterPools [gzip.BestCompression - gzip.BestSpeed + 2]*sync.Pool - -func init() { - for i := gzip.BestSpeed; i <= gzip.BestCompression; i++ { - addLevelPool(i) - } - addLevelPool(gzip.DefaultCompression) -} - -// poolIndex maps a compression level to its index into gzipWriterPools. It -// assumes that level is a valid gzip compression level. -func poolIndex(level int) int { - // gzip.DefaultCompression == -1, so we need to treat it special. - if level == gzip.DefaultCompression { - return gzip.BestCompression - gzip.BestSpeed + 1 - } - return level - gzip.BestSpeed -} - -func addLevelPool(level int) { - gzipWriterPools[poolIndex(level)] = &sync.Pool{ - New: func() interface{} { - // NewWriterLevel only returns error on a bad level, we are guaranteeing - // that this will be a valid level so it is okay to ignore the returned - // error. - w, _ := gzip.NewWriterLevel(nil, level) - return w - }, - } -} - -// GzipResponseWriter provides an http.ResponseWriter interface, which gzips -// bytes before writing them to the underlying response. This doesn't close the -// writers, so don't forget to do that. -// It can be configured to skip response smaller than minSize. -type GzipResponseWriter struct { - http.ResponseWriter - index int // Index for gzipWriterPools. - gw *gzip.Writer - - code int // Saves the WriteHeader value. - - minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed. - buf []byte // Holds the first part of the write before reaching the minSize or the end of the write. -} - -// Write appends data to the gzip writer. -func (w *GzipResponseWriter) Write(b []byte) (int, error) { - // If content type is not set. - if _, ok := w.Header()[contentType]; !ok { - // It infer it from the uncompressed body. - w.Header().Set(contentType, http.DetectContentType(b)) - } - - // GZIP responseWriter is initialized. Use the GZIP responseWriter. - if w.gw != nil { - n, err := w.gw.Write(b) - return n, err - } - - // Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter. - // On the first write, w.buf changes from nil to a valid slice - w.buf = append(w.buf, b...) - - // If the global writes are bigger than the minSize, compression is enable. - if len(w.buf) >= w.minSize { - err := w.startGzip() - if err != nil { - return 0, err - } - } - - return len(b), nil -} - -// startGzip initialize any GZIP specific informations. -func (w *GzipResponseWriter) startGzip() error { - - // Set the GZIP header. - w.Header().Set(contentEncoding, "gzip") - - // if the Content-Length is already set, then calls to Write on gzip - // will fail to set the Content-Length header since its already set - // See: https://github.com/golang/go/issues/14975. - w.Header().Del(contentLength) - - // Write the header to gzip response. - if w.code != 0 { - w.ResponseWriter.WriteHeader(w.code) - } - - // Initialize the GZIP response. - w.init() - - // Flush the buffer into the gzip reponse. - n, err := w.gw.Write(w.buf) - - // This should never happen (per io.Writer docs), but if the write didn't - // accept the entire buffer but returned no specific error, we have no clue - // what's going on, so abort just to be safe. - if err == nil && n < len(w.buf) { - return io.ErrShortWrite - } - - w.buf = nil - return err -} - -// WriteHeader just saves the response code until close or GZIP effective writes. -func (w *GzipResponseWriter) WriteHeader(code int) { - w.code = code -} - -// init graps a new gzip writer from the gzipWriterPool and writes the correct -// content encoding header. -func (w *GzipResponseWriter) init() { - // Bytes written during ServeHTTP are redirected to this gzip writer - // before being written to the underlying response. - gzw := gzipWriterPools[w.index].Get().(*gzip.Writer) - gzw.Reset(w.ResponseWriter) - w.gw = gzw -} - -// Close will close the gzip.Writer and will put it back in the gzipWriterPool. -func (w *GzipResponseWriter) Close() error { - if w.gw == nil { - // Gzip not trigged yet, write out regular response. - if w.code != 0 { - w.ResponseWriter.WriteHeader(w.code) - } - if w.buf != nil { - _, writeErr := w.ResponseWriter.Write(w.buf) - // Returns the error if any at write. - if writeErr != nil { - return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error()) - } - } - return nil - } - - err := w.gw.Close() - gzipWriterPools[w.index].Put(w.gw) - w.gw = nil - return err -} - -// Flush flushes the underlying *gzip.Writer and then the underlying -// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter -// an http.Flusher. -func (w *GzipResponseWriter) Flush() { - if w.gw != nil { - w.gw.Flush() - } - - if fw, ok := w.ResponseWriter.(http.Flusher); ok { - fw.Flush() - } -} - -// Hijack implements http.Hijacker. If the underlying ResponseWriter is a -// Hijacker, its Hijack method is returned. Otherwise an error is returned. -func (w *GzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if hj, ok := w.ResponseWriter.(http.Hijacker); ok { - return hj.Hijack() - } - return nil, nil, fmt.Errorf("http.Hijacker interface is not supported") -} - -// verify Hijacker interface implementation -var _ http.Hijacker = &GzipResponseWriter{} - -// MustNewGzipLevelHandler behaves just like NewGzipLevelHandler except that in -// an error case it panics rather than returning an error. -func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler { - wrap, err := NewGzipLevelHandler(level) - if err != nil { - panic(err) - } - return wrap -} - -// NewGzipLevelHandler returns a wrapper function (often known as middleware) -// which can be used to wrap an HTTP handler to transparently gzip the response -// body if the client supports it (via the Accept-Encoding header). Responses will -// be encoded at the given gzip compression level. An error will be returned only -// if an invalid gzip compression level is given, so if one can ensure the level -// is valid, the returned error can be safely ignored. -func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) { - return NewGzipLevelAndMinSize(level, DefaultMinSize) -} - -// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller -// specify the minimum size before compression. -func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) { - if level != gzip.DefaultCompression && (level < gzip.BestSpeed || level > gzip.BestCompression) { - return nil, fmt.Errorf("invalid compression level requested: %d", level) - } - if minSize < 0 { - return nil, fmt.Errorf("minimum size must be more than zero") - } - return func(h http.Handler) http.Handler { - index := poolIndex(level) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add(vary, acceptEncoding) - - if acceptsGzip(r) { - gw := &GzipResponseWriter{ - ResponseWriter: w, - index: index, - minSize: minSize, - } - defer gw.Close() - - h.ServeHTTP(gw, r) - } else { - h.ServeHTTP(w, r) - } - }) - }, nil -} - -// GzipHandler wraps an HTTP handler, to transparently gzip the response body if -// the client supports it (via the Accept-Encoding header). This will compress at -// the default compression level. -func GzipHandler(h http.Handler) http.Handler { - wrapper, _ := NewGzipLevelHandler(gzip.DefaultCompression) - return wrapper(h) -} - -// acceptsGzip returns true if the given HTTP request indicates that it will -// accept a gzipped response. -func acceptsGzip(r *http.Request) bool { - acceptedEncodings, _ := parseEncodings(r.Header.Get(acceptEncoding)) - return acceptedEncodings["gzip"] > 0.0 -} - -// parseEncodings attempts to parse a list of codings, per RFC 2616, as might -// appear in an Accept-Encoding header. It returns a map of content-codings to -// quality values, and an error containing the errors encountered. It's probably -// safe to ignore those, because silently ignoring errors is how the internet -// works. -// -// See: http://tools.ietf.org/html/rfc2616#section-14.3. -func parseEncodings(s string) (codings, error) { - c := make(codings) - var e []string - - for _, ss := range strings.Split(s, ",") { - coding, qvalue, err := parseCoding(ss) - - if err != nil { - e = append(e, err.Error()) - } else { - c[coding] = qvalue - } - } - - // TODO (adammck): Use a proper multi-error struct, so the individual errors - // can be extracted if anyone cares. - if len(e) > 0 { - return c, fmt.Errorf("errors while parsing encodings: %s", strings.Join(e, ", ")) - } - - return c, nil -} - -// parseCoding parses a single conding (content-coding with an optional qvalue), -// as might appear in an Accept-Encoding header. It attempts to forgive minor -// formatting errors. -func parseCoding(s string) (coding string, qvalue float64, err error) { - for n, part := range strings.Split(s, ";") { - part = strings.TrimSpace(part) - qvalue = DefaultQValue - - if n == 0 { - coding = strings.ToLower(part) - } else if strings.HasPrefix(part, "q=") { - qvalue, err = strconv.ParseFloat(strings.TrimPrefix(part, "q="), 64) - - if qvalue < 0.0 { - qvalue = 0.0 - } else if qvalue > 1.0 { - qvalue = 1.0 - } - } - } - - if coding == "" { - err = fmt.Errorf("empty content-coding") - } - - return -} diff --git a/vendor/github.com/NYTimes/gziphandler/gzip_go18.go b/vendor/github.com/NYTimes/gziphandler/gzip_go18.go deleted file mode 100644 index fa9665b7e80..00000000000 --- a/vendor/github.com/NYTimes/gziphandler/gzip_go18.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build go1.8 - -package gziphandler - -import "net/http" - -// Push initiates an HTTP/2 server push. -// Push returns ErrNotSupported if the client has disabled push or if push -// is not supported on the underlying connection. -func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error { - pusher, ok := w.ResponseWriter.(http.Pusher) - if ok && pusher != nil { - return pusher.Push(target, setAcceptEncodingForPushOptions(opts)) - } - return http.ErrNotSupported -} - -// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers. -func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions { - - if opts == nil { - opts = &http.PushOptions{ - Header: http.Header{ - acceptEncoding: []string{"gzip"}, - }, - } - return opts - } - - if opts.Header == nil { - opts.Header = http.Header{ - acceptEncoding: []string{"gzip"}, - } - return opts - } - - if encoding := opts.Header.Get(acceptEncoding); encoding == "" { - opts.Header.Add(acceptEncoding, "gzip") - return opts - } - - return opts -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 005bde0daaf..4bd182ab2e5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -1415,10 +1415,11 @@ var awsPartition = partition{ "iotanalytics": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "kinesis": service{ @@ -2109,11 +2110,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -2618,6 +2624,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "codedeploy": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 4ae7a5c4d7d..be84dd516e4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.15.42" +const SDKVersion = "1.15.43" diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177be663..00000000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index 587b1fc5ba8..00000000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,292 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile - } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(float64(l) * q) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/github.com/coreos/etcd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE deleted file mode 100644 index b39ddfa5cbd..00000000000 --- a/vendor/github.com/coreos/etcd/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2014 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/etcd/alarm/alarms.go b/vendor/github.com/coreos/etcd/alarm/alarms.go deleted file mode 100644 index 4f0ebe93f3b..00000000000 --- a/vendor/github.com/coreos/etcd/alarm/alarms.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package alarm manages health status alarms in etcd. -package alarm - -import ( - "sync" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/pkg/capnslog" -) - -var ( - alarmBucketName = []byte("alarm") - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "alarm") -) - -type BackendGetter interface { - Backend() backend.Backend -} - -type alarmSet map[types.ID]*pb.AlarmMember - -// AlarmStore persists alarms to the backend. -type AlarmStore struct { - mu sync.Mutex - types map[pb.AlarmType]alarmSet - - bg BackendGetter -} - -func NewAlarmStore(bg BackendGetter) (*AlarmStore, error) { - ret := &AlarmStore{types: make(map[pb.AlarmType]alarmSet), bg: bg} - err := ret.restore() - return ret, err -} - -func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember { - a.mu.Lock() - defer a.mu.Unlock() - - newAlarm := &pb.AlarmMember{MemberID: uint64(id), Alarm: at} - if m := a.addToMap(newAlarm); m != newAlarm { - return m - } - - v, err := newAlarm.Marshal() - if err != nil { - plog.Panicf("failed to marshal alarm member") - } - - b := a.bg.Backend() - b.BatchTx().Lock() - b.BatchTx().UnsafePut(alarmBucketName, v, nil) - b.BatchTx().Unlock() - - return newAlarm -} - -func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember { - a.mu.Lock() - defer a.mu.Unlock() - - t := a.types[at] - if t == nil { - t = make(alarmSet) - a.types[at] = t - } - m := t[id] - if m == nil { - return nil - } - - delete(t, id) - - v, err := m.Marshal() - if err != nil { - plog.Panicf("failed to marshal alarm member") - } - - b := a.bg.Backend() - b.BatchTx().Lock() - b.BatchTx().UnsafeDelete(alarmBucketName, v) - b.BatchTx().Unlock() - - return m -} - -func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) { - a.mu.Lock() - defer a.mu.Unlock() - if at == pb.AlarmType_NONE { - for _, t := range a.types { - for _, m := range t { - ret = append(ret, m) - } - } - return ret - } - for _, m := range a.types[at] { - ret = append(ret, m) - } - return ret -} - -func (a *AlarmStore) restore() error { - b := a.bg.Backend() - tx := b.BatchTx() - - tx.Lock() - tx.UnsafeCreateBucket(alarmBucketName) - err := tx.UnsafeForEach(alarmBucketName, func(k, v []byte) error { - var m pb.AlarmMember - if err := m.Unmarshal(k); err != nil { - return err - } - a.addToMap(&m) - return nil - }) - tx.Unlock() - - b.ForceCommit() - return err -} - -func (a *AlarmStore) addToMap(newAlarm *pb.AlarmMember) *pb.AlarmMember { - t := a.types[newAlarm.Alarm] - if t == nil { - t = make(alarmSet) - a.types[newAlarm.Alarm] = t - } - m := t[types.ID(newAlarm.MemberID)] - if m != nil { - return m - } - t[types.ID(newAlarm.MemberID)] = newAlarm - return newAlarm -} diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go deleted file mode 100644 index c6e2a12a7fa..00000000000 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go +++ /dev/null @@ -1,824 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: auth.proto -// DO NOT EDIT! - -/* - Package authpb is a generated protocol buffer package. - - It is generated from these files: - auth.proto - - It has these top-level messages: - User - Permission - Role -*/ -package authpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Permission_Type int32 - -const ( - READ Permission_Type = 0 - WRITE Permission_Type = 1 - READWRITE Permission_Type = 2 -) - -var Permission_Type_name = map[int32]string{ - 0: "READ", - 1: "WRITE", - 2: "READWRITE", -} -var Permission_Type_value = map[string]int32{ - "READ": 0, - "WRITE": 1, - "READWRITE": 2, -} - -func (x Permission_Type) String() string { - return proto.EnumName(Permission_Type_name, int32(x)) -} -func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} } - -// User is a single entry in the bucket authUsers -type User struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} } - -// Permission is a single entity -type Permission struct { - PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` -} - -func (m *Permission) Reset() { *m = Permission{} } -func (m *Permission) String() string { return proto.CompactTextString(m) } -func (*Permission) ProtoMessage() {} -func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} } - -// Role is a single entry in the bucket authRoles -type Role struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"` -} - -func (m *Role) Reset() { *m = Role{} } -func (m *Role) String() string { return proto.CompactTextString(m) } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} } - -func init() { - proto.RegisterType((*User)(nil), "authpb.User") - proto.RegisterType((*Permission)(nil), "authpb.Permission") - proto.RegisterType((*Role)(nil), "authpb.Role") - proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) -} -func (m *User) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *User) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *Permission) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Permission) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PermType != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) - } - if len(m.Key) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - return i, nil -} - -func (m *Role) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.KeyPermission) > 0 { - for _, msg := range m.KeyPermission { - dAtA[i] = 0x12 - i++ - i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Auth(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Auth(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *User) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovAuth(uint64(l)) - } - } - return n -} - -func (m *Permission) Size() (n int) { - var l int - _ = l - if m.PermType != 0 { - n += 1 + sovAuth(uint64(m.PermType)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - return n -} - -func (m *Role) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if len(m.KeyPermission) > 0 { - for _, e := range m.KeyPermission { - l = e.Size() - n += 1 + l + sovAuth(uint64(l)) - } - } - return n -} - -func sovAuth(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozAuth(x uint64) (n int) { - return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *User) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: User: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...) - if m.Password == nil { - m.Password = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Permission) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Permission: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType) - } - m.PermType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PermType |= (Permission_Type(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Role) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Role: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) - if m.Name == nil { - m.Name = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KeyPermission = append(m.KeyPermission, &Permission{}) - if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAuth(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthAuth - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipAuth(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } - -var fileDescriptorAuth = []byte{ - // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, - 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, - 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, - 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, - 0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d, - 0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd, - 0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51, - 0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef, - 0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00, - 0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc, - 0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70, - 0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41, - 0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc, - 0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b, - 0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1, - 0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee, - 0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4, - 0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/auth/doc.go b/vendor/github.com/coreos/etcd/auth/doc.go deleted file mode 100644 index 72741a10774..00000000000 --- a/vendor/github.com/coreos/etcd/auth/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package auth provides client role authentication for accessing keys in etcd. -package auth diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go deleted file mode 100644 index 3cd1ad2a411..00000000000 --- a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "bytes" - "sort" - - "github.com/coreos/etcd/auth/authpb" - "github.com/coreos/etcd/mvcc/backend" -) - -// isSubset returns true if a is a subset of b. -// If a is a prefix of b, then a is a subset of b. -// Given intervals [a1,a2) and [b1,b2), is -// the a interval a subset of b? -func isSubset(a, b *rangePerm) bool { - switch { - case len(a.end) == 0 && len(b.end) == 0: - // a, b are both keys - return bytes.Equal(a.begin, b.begin) - case len(b.end) == 0: - // b is a key, a is a range - return false - case len(a.end) == 0: - // a is a key, b is a range. need b1 <= a1 and a1 < b2 - return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.begin, b.end) < 0 - default: - // both are ranges. need b1 <= a1 and a2 <= b2 - return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.end, b.end) <= 0 - } -} - -func isRangeEqual(a, b *rangePerm) bool { - return bytes.Equal(a.begin, b.begin) && bytes.Equal(a.end, b.end) -} - -// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms. -// If there are equal ranges, removeSubsetRangePerms only keeps one of them. -// It returns a sorted rangePerm slice. -func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) { - sort.Sort(RangePermSliceByBegin(perms)) - var prev *rangePerm - for i := range perms { - if i == 0 { - prev = perms[i] - newp = append(newp, perms[i]) - continue - } - if isRangeEqual(perms[i], prev) { - continue - } - if isSubset(perms[i], prev) { - continue - } - if isSubset(prev, perms[i]) { - prev = perms[i] - newp[len(newp)-1] = perms[i] - continue - } - prev = perms[i] - newp = append(newp, perms[i]) - } - return newp -} - -// mergeRangePerms merges adjacent rangePerms. -func mergeRangePerms(perms []*rangePerm) []*rangePerm { - var merged []*rangePerm - perms = removeSubsetRangePerms(perms) - - i := 0 - for i < len(perms) { - begin, next := i, i - for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) >= 0 { - next++ - } - // don't merge ["a", "b") with ["b", ""), because perms[next+1].end is empty. - if next != begin && len(perms[next].end) > 0 { - merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end}) - } else { - merged = append(merged, perms[begin]) - if next != begin { - merged = append(merged, perms[next]) - } - } - i = next + 1 - } - - return merged -} - -func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions { - user := getUser(tx, userName) - if user == nil { - plog.Errorf("invalid user name %s", userName) - return nil - } - - var readPerms, writePerms []*rangePerm - - for _, roleName := range user.Roles { - role := getRole(tx, roleName) - if role == nil { - continue - } - - for _, perm := range role.KeyPermission { - rp := &rangePerm{begin: perm.Key, end: perm.RangeEnd} - - switch perm.PermType { - case authpb.READWRITE: - readPerms = append(readPerms, rp) - writePerms = append(writePerms, rp) - - case authpb.READ: - readPerms = append(readPerms, rp) - - case authpb.WRITE: - writePerms = append(writePerms, rp) - } - } - } - - return &unifiedRangePermissions{ - readPerms: mergeRangePerms(readPerms), - writePerms: mergeRangePerms(writePerms), - } -} - -func checkKeyPerm(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { - var tocheck []*rangePerm - - switch permtyp { - case authpb.READ: - tocheck = cachedPerms.readPerms - case authpb.WRITE: - tocheck = cachedPerms.writePerms - default: - plog.Panicf("unknown auth type: %v", permtyp) - } - - requiredPerm := &rangePerm{begin: key, end: rangeEnd} - - for _, perm := range tocheck { - if isSubset(requiredPerm, perm) { - return true - } - } - - return false -} - -func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { - // assumption: tx is Lock()ed - _, ok := as.rangePermCache[userName] - if !ok { - perms := getMergedPerms(tx, userName) - if perms == nil { - plog.Errorf("failed to create a unified permission of user %s", userName) - return false - } - as.rangePermCache[userName] = perms - } - - return checkKeyPerm(as.rangePermCache[userName], key, rangeEnd, permtyp) -} - -func (as *authStore) clearCachedPerm() { - as.rangePermCache = make(map[string]*unifiedRangePermissions) -} - -func (as *authStore) invalidateCachedPerm(userName string) { - delete(as.rangePermCache, userName) -} - -type unifiedRangePermissions struct { - // readPerms[i] and readPerms[j] (i != j) don't overlap - readPerms []*rangePerm - // writePerms[i] and writePerms[j] (i != j) don't overlap, too - writePerms []*rangePerm -} - -type rangePerm struct { - begin, end []byte -} - -type RangePermSliceByBegin []*rangePerm - -func (slice RangePermSliceByBegin) Len() int { - return len(slice) -} - -func (slice RangePermSliceByBegin) Less(i, j int) bool { - switch bytes.Compare(slice[i].begin, slice[j].begin) { - case 0: // begin(i) == begin(j) - return bytes.Compare(slice[i].end, slice[j].end) == -1 - - case -1: // begin(i) < begin(j) - return true - - default: - return false - } -} - -func (slice RangePermSliceByBegin) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] -} diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go deleted file mode 100644 index a39f3927685..00000000000 --- a/vendor/github.com/coreos/etcd/auth/simple_token.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -// CAUTION: This randum number based token mechanism is only for testing purpose. -// JWT based mechanism will be added in the near future. - -import ( - "crypto/rand" - "math/big" - "strings" - "sync" - "time" -) - -const ( - letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - defaultSimpleTokenLength = 16 -) - -// var for testing purposes -var ( - simpleTokenTTL = 5 * time.Minute - simpleTokenTTLResolution = 1 * time.Second -) - -type simpleTokenTTLKeeper struct { - tokens map[string]time.Time - donec chan struct{} - stopc chan struct{} - deleteTokenFunc func(string) - mu *sync.Mutex -} - -func (tm *simpleTokenTTLKeeper) stop() { - select { - case tm.stopc <- struct{}{}: - case <-tm.donec: - } - <-tm.donec -} - -func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) { - tm.tokens[token] = time.Now().Add(simpleTokenTTL) -} - -func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) { - if _, ok := tm.tokens[token]; ok { - tm.tokens[token] = time.Now().Add(simpleTokenTTL) - } -} - -func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) { - delete(tm.tokens, token) -} - -func (tm *simpleTokenTTLKeeper) run() { - tokenTicker := time.NewTicker(simpleTokenTTLResolution) - defer func() { - tokenTicker.Stop() - close(tm.donec) - }() - for { - select { - case <-tokenTicker.C: - nowtime := time.Now() - tm.mu.Lock() - for t, tokenendtime := range tm.tokens { - if nowtime.After(tokenendtime) { - tm.deleteTokenFunc(t) - delete(tm.tokens, t) - } - } - tm.mu.Unlock() - case <-tm.stopc: - return - } - } -} - -func (as *authStore) enable() { - delf := func(tk string) { - if username, ok := as.simpleTokens[tk]; ok { - plog.Infof("deleting token %s for user %s", tk, username) - delete(as.simpleTokens, tk) - } - } - as.simpleTokenKeeper = &simpleTokenTTLKeeper{ - tokens: make(map[string]time.Time), - donec: make(chan struct{}), - stopc: make(chan struct{}), - deleteTokenFunc: delf, - mu: &as.simpleTokensMu, - } - go as.simpleTokenKeeper.run() -} - -func (as *authStore) GenSimpleToken() (string, error) { - ret := make([]byte, defaultSimpleTokenLength) - - for i := 0; i < defaultSimpleTokenLength; i++ { - bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) - if err != nil { - return "", err - } - - ret[i] = letters[bInt.Int64()] - } - - return string(ret), nil -} - -func (as *authStore) assignSimpleTokenToUser(username, token string) { - as.simpleTokensMu.Lock() - _, ok := as.simpleTokens[token] - if ok { - plog.Panicf("token %s is alredy used", token) - } - - as.simpleTokens[token] = username - as.simpleTokenKeeper.addSimpleToken(token) - as.simpleTokensMu.Unlock() -} - -func (as *authStore) invalidateUser(username string) { - if as.simpleTokenKeeper == nil { - return - } - as.simpleTokensMu.Lock() - for token, name := range as.simpleTokens { - if strings.Compare(name, username) == 0 { - delete(as.simpleTokens, token) - as.simpleTokenKeeper.deleteSimpleToken(token) - } - } - as.simpleTokensMu.Unlock() -} diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go deleted file mode 100644 index 236bb2c529d..00000000000 --- a/vendor/github.com/coreos/etcd/auth/store.go +++ /dev/null @@ -1,999 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "sort" - "strconv" - "strings" - "sync" - - "github.com/coreos/etcd/auth/authpb" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/pkg/capnslog" - "golang.org/x/crypto/bcrypt" - "golang.org/x/net/context" - "google.golang.org/grpc/metadata" -) - -var ( - enableFlagKey = []byte("authEnabled") - authEnabled = []byte{1} - authDisabled = []byte{0} - - revisionKey = []byte("authRevision") - - authBucketName = []byte("auth") - authUsersBucketName = []byte("authUsers") - authRolesBucketName = []byte("authRoles") - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth") - - ErrRootUserNotExist = errors.New("auth: root user does not exist") - ErrRootRoleNotExist = errors.New("auth: root user does not have root role") - ErrUserAlreadyExist = errors.New("auth: user already exists") - ErrUserEmpty = errors.New("auth: user name is empty") - ErrUserNotFound = errors.New("auth: user not found") - ErrRoleAlreadyExist = errors.New("auth: role already exists") - ErrRoleNotFound = errors.New("auth: role not found") - ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password") - ErrPermissionDenied = errors.New("auth: permission denied") - ErrRoleNotGranted = errors.New("auth: role is not granted to the user") - ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role") - ErrAuthNotEnabled = errors.New("auth: authentication is not enabled") - ErrAuthOldRevision = errors.New("auth: revision in header is old") - ErrInvalidAuthToken = errors.New("auth: invalid auth token") - - // BcryptCost is the algorithm cost / strength for hashing auth passwords - BcryptCost = bcrypt.DefaultCost -) - -const ( - rootUser = "root" - rootRole = "root" - - revBytesLen = 8 -) - -type AuthInfo struct { - Username string - Revision uint64 -} - -type AuthStore interface { - // AuthEnable turns on the authentication feature - AuthEnable() error - - // AuthDisable turns off the authentication feature - AuthDisable() - - // Authenticate does authentication based on given user name and password - Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) - - // Recover recovers the state of auth store from the given backend - Recover(b backend.Backend) - - // UserAdd adds a new user - UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - - // UserDelete deletes a user - UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - - // UserChangePassword changes a password of a user - UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - - // UserGrantRole grants a role to the user - UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - - // UserGet gets the detailed information of a users - UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - - // UserRevokeRole revokes a role of a user - UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - - // RoleAdd adds a new role - RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - - // RoleGrantPermission grants a permission to a role - RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - - // RoleGet gets the detailed information of a role - RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - - // RoleRevokePermission gets the detailed information of a role - RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - - // RoleDelete gets the detailed information of a role - RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - - // UserList gets a list of all users - UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - - // RoleList gets a list of all roles - RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) - - // AuthInfoFromToken gets a username from the given Token and current revision number - // (The revision number is used for preventing the TOCTOU problem) - AuthInfoFromToken(token string) (*AuthInfo, bool) - - // IsPutPermitted checks put permission of the user - IsPutPermitted(authInfo *AuthInfo, key []byte) error - - // IsRangePermitted checks range permission of the user - IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error - - // IsDeleteRangePermitted checks delete-range permission of the user - IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error - - // IsAdminPermitted checks admin permission of the user - IsAdminPermitted(authInfo *AuthInfo) error - - // GenSimpleToken produces a simple random string - GenSimpleToken() (string, error) - - // Revision gets current revision of authStore - Revision() uint64 - - // CheckPassword checks a given pair of username and password is correct - CheckPassword(username, password string) (uint64, error) - - // Close does cleanup of AuthStore - Close() error - - // AuthInfoFromCtx gets AuthInfo from gRPC's context - AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) -} - -type authStore struct { - be backend.Backend - enabled bool - enabledMu sync.RWMutex - - rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions - - revision uint64 - - // tokenSimple in v3.2+ - indexWaiter func(uint64) <-chan struct{} - simpleTokenKeeper *simpleTokenTTLKeeper - simpleTokensMu sync.Mutex - simpleTokens map[string]string // token -> username -} - -func newDeleterFunc(as *authStore) func(string) { - return func(t string) { - as.simpleTokensMu.Lock() - defer as.simpleTokensMu.Unlock() - if username, ok := as.simpleTokens[t]; ok { - plog.Infof("deleting token %s for user %s", t, username) - delete(as.simpleTokens, t) - } - } -} - -func (as *authStore) AuthEnable() error { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if as.enabled { - plog.Noticef("Authentication already enabled") - return nil - } - b := as.be - tx := b.BatchTx() - tx.Lock() - defer func() { - tx.Unlock() - b.ForceCommit() - }() - - u := getUser(tx, rootUser) - if u == nil { - return ErrRootUserNotExist - } - - if !hasRootRole(u) { - return ErrRootRoleNotExist - } - - tx.UnsafePut(authBucketName, enableFlagKey, authEnabled) - - as.enabled = true - as.enable() - - as.rangePermCache = make(map[string]*unifiedRangePermissions) - - as.revision = getRevision(tx) - - plog.Noticef("Authentication enabled") - - return nil -} - -func (as *authStore) AuthDisable() { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if !as.enabled { - return - } - b := as.be - tx := b.BatchTx() - tx.Lock() - tx.UnsafePut(authBucketName, enableFlagKey, authDisabled) - as.commitRevision(tx) - tx.Unlock() - b.ForceCommit() - - as.enabled = false - - as.simpleTokensMu.Lock() - tk := as.simpleTokenKeeper - as.simpleTokenKeeper = nil - as.simpleTokens = make(map[string]string) // invalidate all tokens - as.simpleTokensMu.Unlock() - if tk != nil { - tk.stop() - } - - plog.Noticef("Authentication disabled") -} - -func (as *authStore) Close() error { - as.enabledMu.Lock() - defer as.enabledMu.Unlock() - if !as.enabled { - return nil - } - if as.simpleTokenKeeper != nil { - as.simpleTokenKeeper.stop() - as.simpleTokenKeeper = nil - } - return nil -} - -func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) { - if !as.isAuthEnabled() { - return nil, ErrAuthNotEnabled - } - - // TODO(mitake): after adding jwt support, branching based on values of ctx is required - index := ctx.Value("index").(uint64) - simpleToken := ctx.Value("simpleToken").(string) - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, username) - if user == nil { - return nil, ErrAuthFailed - } - - token := fmt.Sprintf("%s.%d", simpleToken, index) - as.assignSimpleTokenToUser(username, token) - - plog.Infof("authorized %s, token is %s", username, token) - return &pb.AuthenticateResponse{Token: token}, nil -} - -func (as *authStore) CheckPassword(username, password string) (uint64, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, username) - if user == nil { - return 0, ErrAuthFailed - } - - if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil { - plog.Noticef("authentication failed, invalid password for user %s", username) - return 0, ErrAuthFailed - } - - return getRevision(tx), nil -} - -func (as *authStore) Recover(be backend.Backend) { - enabled := false - as.be = be - tx := be.BatchTx() - tx.Lock() - _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0) - if len(vs) == 1 { - if bytes.Equal(vs[0], authEnabled) { - enabled = true - } - } - - as.revision = getRevision(tx) - - tx.Unlock() - - as.enabledMu.Lock() - as.enabled = enabled - as.enabledMu.Unlock() -} - -func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - if len(r.Name) == 0 { - return nil, ErrUserEmpty - } - - hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) - if err != nil { - plog.Errorf("failed to hash password: %s", err) - return nil, err - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user != nil { - return nil, ErrUserAlreadyExist - } - - newUser := &authpb.User{ - Name: []byte(r.Name), - Password: hashed, - } - - putUser(tx, newUser) - - as.commitRevision(tx) - - plog.Noticef("added a new user: %s", r.Name) - - return &pb.AuthUserAddResponse{}, nil -} - -func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - - delUser(tx, r.Name) - - as.commitRevision(tx) - - as.invalidateCachedPerm(r.Name) - as.invalidateUser(r.Name) - - plog.Noticef("deleted a user: %s", r.Name) - - return &pb.AuthUserDeleteResponse{}, nil -} - -func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - // TODO(mitake): measure the cost of bcrypt.GenerateFromPassword() - // If the cost is too high, we should move the encryption to outside of the raft - hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) - if err != nil { - plog.Errorf("failed to hash password: %s", err) - return nil, err - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - - updatedUser := &authpb.User{ - Name: []byte(r.Name), - Roles: user.Roles, - Password: hashed, - } - - putUser(tx, updatedUser) - - as.commitRevision(tx) - - as.invalidateCachedPerm(r.Name) - as.invalidateUser(r.Name) - - plog.Noticef("changed a password of a user: %s", r.Name) - - return &pb.AuthUserChangePasswordResponse{}, nil -} - -func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.User) - if user == nil { - return nil, ErrUserNotFound - } - - if r.Role != rootRole { - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - } - - idx := sort.SearchStrings(user.Roles, r.Role) - if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 { - plog.Warningf("user %s is already granted role %s", r.User, r.Role) - return &pb.AuthUserGrantRoleResponse{}, nil - } - - user.Roles = append(user.Roles, r.Role) - sort.Sort(sort.StringSlice(user.Roles)) - - putUser(tx, user) - - as.invalidateCachedPerm(r.User) - - as.commitRevision(tx) - - plog.Noticef("granted role %s to user %s", r.Role, r.User) - return &pb.AuthUserGrantRoleResponse{}, nil -} - -func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthUserGetResponse - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - resp.Roles = append(resp.Roles, user.Roles...) - return &resp, nil -} - -func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthUserListResponse - - users := getAllUsers(tx) - - for _, u := range users { - resp.Users = append(resp.Users, string(u.Name)) - } - - return &resp, nil -} - -func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, r.Name) - if user == nil { - return nil, ErrUserNotFound - } - - updatedUser := &authpb.User{ - Name: user.Name, - Password: user.Password, - } - - for _, role := range user.Roles { - if strings.Compare(role, r.Role) != 0 { - updatedUser.Roles = append(updatedUser.Roles, role) - } - } - - if len(updatedUser.Roles) == len(user.Roles) { - return nil, ErrRoleNotGranted - } - - putUser(tx, updatedUser) - - as.invalidateCachedPerm(r.Name) - - as.commitRevision(tx) - - plog.Noticef("revoked role %s from user %s", r.Role, r.Name) - return &pb.AuthUserRevokeRoleResponse{}, nil -} - -func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthRoleGetResponse - - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - resp.Perm = append(resp.Perm, role.KeyPermission...) - return &resp, nil -} - -func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var resp pb.AuthRoleListResponse - - roles := getAllRoles(tx) - - for _, r := range roles { - resp.Roles = append(resp.Roles, string(r.Name)) - } - - return &resp, nil -} - -func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - - updatedRole := &authpb.Role{ - Name: role.Name, - } - - for _, perm := range role.KeyPermission { - if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) { - updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm) - } - } - - if len(role.KeyPermission) == len(updatedRole.KeyPermission) { - return nil, ErrPermissionNotGranted - } - - putRole(tx, updatedRole) - - // TODO(mitake): currently single role update invalidates every cache - // It should be optimized. - as.clearCachedPerm() - - as.commitRevision(tx) - - plog.Noticef("revoked key %s from role %s", r.Key, r.Role) - return &pb.AuthRoleRevokePermissionResponse{}, nil -} - -func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - // TODO(mitake): current scheme of role deletion allows existing users to have the deleted roles - // - // Assume a case like below: - // create a role r1 - // create a user u1 and grant r1 to u1 - // delete r1 - // - // After this sequence, u1 is still granted the role r1. So if admin create a new role with the name r1, - // the new r1 is automatically granted u1. - // In some cases, it would be confusing. So we need to provide an option for deleting the grant relation - // from all users. - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Role) - if role == nil { - return nil, ErrRoleNotFound - } - - delRole(tx, r.Role) - - as.commitRevision(tx) - - plog.Noticef("deleted role %s", r.Role) - return &pb.AuthRoleDeleteResponse{}, nil -} - -func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Name) - if role != nil { - return nil, ErrRoleAlreadyExist - } - - newRole := &authpb.Role{ - Name: []byte(r.Name), - } - - putRole(tx, newRole) - - as.commitRevision(tx) - - plog.Noticef("Role %s is created", r.Name) - - return &pb.AuthRoleAddResponse{}, nil -} - -func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) { - // same as '(t *tokenSimple) info' in v3.2+ - as.simpleTokensMu.Lock() - username, ok := as.simpleTokens[token] - if ok && as.simpleTokenKeeper != nil { - as.simpleTokenKeeper.resetSimpleToken(token) - } - as.simpleTokensMu.Unlock() - return &AuthInfo{Username: username, Revision: as.revision}, ok -} - -type permSlice []*authpb.Permission - -func (perms permSlice) Len() int { - return len(perms) -} - -func (perms permSlice) Less(i, j int) bool { - return bytes.Compare(perms[i].Key, perms[j].Key) < 0 -} - -func (perms permSlice) Swap(i, j int) { - perms[i], perms[j] = perms[j], perms[i] -} - -func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - role := getRole(tx, r.Name) - if role == nil { - return nil, ErrRoleNotFound - } - - idx := sort.Search(len(role.KeyPermission), func(i int) bool { - return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0 - }) - - if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) { - // update existing permission - role.KeyPermission[idx].PermType = r.Perm.PermType - } else { - // append new permission to the role - newPerm := &authpb.Permission{ - Key: []byte(r.Perm.Key), - RangeEnd: []byte(r.Perm.RangeEnd), - PermType: r.Perm.PermType, - } - - role.KeyPermission = append(role.KeyPermission, newPerm) - sort.Sort(permSlice(role.KeyPermission)) - } - - putRole(tx, role) - - // TODO(mitake): currently single role update invalidates every cache - // It should be optimized. - as.clearCachedPerm() - - as.commitRevision(tx) - - plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)]) - - return &pb.AuthRoleGrantPermissionResponse{}, nil -} - -func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error { - // TODO(mitake): this function would be costly so we need a caching mechanism - if !as.isAuthEnabled() { - return nil - } - - // only gets rev == 0 when passed AuthInfo{}; no user given - if revision == 0 { - return ErrUserEmpty - } - - if revision < as.revision { - return ErrAuthOldRevision - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(tx, userName) - if user == nil { - plog.Errorf("invalid user name %s for permission checking", userName) - return ErrPermissionDenied - } - - // root role should have permission on all ranges - if hasRootRole(user) { - return nil - } - - if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) { - return nil - } - - return ErrPermissionDenied -} - -func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE) -} - -func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ) -} - -func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error { - return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE) -} - -func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { - if !as.isAuthEnabled() { - return nil - } - if authInfo == nil { - return ErrUserEmpty - } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - u := getUser(tx, authInfo.Username) - if u == nil { - return ErrUserNotFound - } - - if !hasRootRole(u) { - return ErrPermissionDenied - } - - return nil -} - -func getUser(tx backend.BatchTx, username string) *authpb.User { - _, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0) - if len(vs) == 0 { - return nil - } - - user := &authpb.User{} - err := user.Unmarshal(vs[0]) - if err != nil { - plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err) - } - return user -} - -func getAllUsers(tx backend.BatchTx) []*authpb.User { - _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1) - if len(vs) == 0 { - return nil - } - - var users []*authpb.User - - for _, v := range vs { - user := &authpb.User{} - err := user.Unmarshal(v) - if err != nil { - plog.Panicf("failed to unmarshal user struct: %s", err) - } - - users = append(users, user) - } - - return users -} - -func putUser(tx backend.BatchTx, user *authpb.User) { - b, err := user.Marshal() - if err != nil { - plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err) - } - tx.UnsafePut(authUsersBucketName, user.Name, b) -} - -func delUser(tx backend.BatchTx, username string) { - tx.UnsafeDelete(authUsersBucketName, []byte(username)) -} - -func getRole(tx backend.BatchTx, rolename string) *authpb.Role { - _, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0) - if len(vs) == 0 { - return nil - } - - role := &authpb.Role{} - err := role.Unmarshal(vs[0]) - if err != nil { - plog.Panicf("failed to unmarshal role struct (name: %s): %s", rolename, err) - } - return role -} - -func getAllRoles(tx backend.BatchTx) []*authpb.Role { - _, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1) - if len(vs) == 0 { - return nil - } - - var roles []*authpb.Role - - for _, v := range vs { - role := &authpb.Role{} - err := role.Unmarshal(v) - if err != nil { - plog.Panicf("failed to unmarshal role struct: %s", err) - } - - roles = append(roles, role) - } - - return roles -} - -func putRole(tx backend.BatchTx, role *authpb.Role) { - b, err := role.Marshal() - if err != nil { - plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err) - } - - tx.UnsafePut(authRolesBucketName, []byte(role.Name), b) -} - -func delRole(tx backend.BatchTx, rolename string) { - tx.UnsafeDelete(authRolesBucketName, []byte(rolename)) -} - -func (as *authStore) isAuthEnabled() bool { - as.enabledMu.RLock() - defer as.enabledMu.RUnlock() - return as.enabled -} - -func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore { - tx := be.BatchTx() - tx.Lock() - - tx.UnsafeCreateBucket(authBucketName) - tx.UnsafeCreateBucket(authUsersBucketName) - tx.UnsafeCreateBucket(authRolesBucketName) - - enabled := false - _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0) - if len(vs) == 1 { - if bytes.Equal(vs[0], authEnabled) { - enabled = true - } - } - - as := &authStore{ - be: be, - simpleTokens: make(map[string]string), - revision: getRevision(tx), - indexWaiter: indexWaiter, - enabled: enabled, - rangePermCache: make(map[string]*unifiedRangePermissions), - } - - if enabled { - as.enable() - } - - if as.revision == 0 { - as.commitRevision(tx) - } - - tx.Unlock() - be.ForceCommit() - - return as -} - -func hasRootRole(u *authpb.User) bool { - for _, r := range u.Roles { - if r == rootRole { - return true - } - } - return false -} - -func (as *authStore) commitRevision(tx backend.BatchTx) { - as.revision++ - revBytes := make([]byte, revBytesLen) - binary.BigEndian.PutUint64(revBytes, as.revision) - tx.UnsafePut(authBucketName, revisionKey, revBytes) -} - -func getRevision(tx backend.BatchTx) uint64 { - _, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0) - if len(vs) != 1 { - // this can happen in the initialization phase - return 0 - } - - return binary.BigEndian.Uint64(vs[0]) -} - -func (as *authStore) Revision() uint64 { - return as.revision -} - -func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool { - splitted := strings.Split(token, ".") - if len(splitted) != 2 { - return false - } - index, err := strconv.Atoi(splitted[1]) - if err != nil { - return false - } - - select { - case <-as.indexWaiter(uint64(index)): - return true - case <-ctx.Done(): - } - - return false -} - -func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { - md, ok := metadata.FromContext(ctx) - if !ok { - return nil, nil - } - - ts, tok := md["token"] - if !tok { - return nil, nil - } - - token := ts[0] - if !as.isValidSimpleToken(token, ctx) { - return nil, ErrInvalidAuthToken - } - - authInfo, uok := as.AuthInfoFromToken(token) - if !uok { - plog.Warningf("invalid auth token: %s", token) - return nil, ErrInvalidAuthToken - } - return authInfo, nil -} diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go deleted file mode 100644 index d15e00dd716..00000000000 --- a/vendor/github.com/coreos/etcd/client/auth_role.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - - "golang.org/x/net/context" -) - -type Role struct { - Role string `json:"role"` - Permissions Permissions `json:"permissions"` - Grant *Permissions `json:"grant,omitempty"` - Revoke *Permissions `json:"revoke,omitempty"` -} - -type Permissions struct { - KV rwPermission `json:"kv"` -} - -type rwPermission struct { - Read []string `json:"read"` - Write []string `json:"write"` -} - -type PermissionType int - -const ( - ReadPermission PermissionType = iota - WritePermission - ReadWritePermission -) - -// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to -// interact with etcd's role creation and modification features. -func NewAuthRoleAPI(c Client) AuthRoleAPI { - return &httpAuthRoleAPI{ - client: c, - } -} - -type AuthRoleAPI interface { - // AddRole adds a role. - AddRole(ctx context.Context, role string) error - - // RemoveRole removes a role. - RemoveRole(ctx context.Context, role string) error - - // GetRole retrieves role details. - GetRole(ctx context.Context, role string) (*Role, error) - - // GrantRoleKV grants a role some permission prefixes for the KV store. - GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // RevokeRoleKV revokes some permission prefixes for a role on the KV store. - RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // ListRoles lists roles. - ListRoles(ctx context.Context) ([]string, error) -} - -type httpAuthRoleAPI struct { - client httpClient -} - -type authRoleAPIAction struct { - verb string - name string - role *Role -} - -type authRoleAPIList struct{} - -func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", l.name) - if l.role == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.role) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { - resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - var roleList struct { - Roles []Role `json:"roles"` - } - if err = json.Unmarshal(body, &roleList); err != nil { - return nil, err - } - ret := make([]string, 0, len(roleList.Roles)) - for _, r := range roleList.Roles { - ret = append(ret, r.Role) - } - return ret, nil -} - -func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { - role := &Role{ - Role: rolename, - } - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "DELETE", - name: rolename, - }) -} - -func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return err - } - if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err := json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { - return r.modRole(ctx, &authRoleAPIAction{ - verb: "GET", - name: rolename, - }) -} - -func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { - var out rwPermission - switch permType { - case ReadPermission: - out.Read = prefixes - case WritePermission: - out.Write = prefixes - case ReadWritePermission: - out.Read = prefixes - out.Write = prefixes - } - return out -} - -func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Grant: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Revoke: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var role Role - if err = json.Unmarshal(body, &role); err != nil { - return nil, err - } - return &role, nil -} diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go deleted file mode 100644 index 97c3f31813b..00000000000 --- a/vendor/github.com/coreos/etcd/client/auth_user.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - "path" - - "golang.org/x/net/context" -) - -var ( - defaultV2AuthPrefix = "/v2/auth" -) - -type User struct { - User string `json:"user"` - Password string `json:"password,omitempty"` - Roles []string `json:"roles"` - Grant []string `json:"grant,omitempty"` - Revoke []string `json:"revoke,omitempty"` -} - -// userListEntry is the user representation given by the server for ListUsers -type userListEntry struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -type UserRoles struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -func v2AuthURL(ep url.URL, action string, name string) *url.URL { - if name != "" { - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) - return &ep - } - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) - return &ep -} - -// NewAuthAPI constructs a new AuthAPI that uses HTTP to -// interact with etcd's general auth features. -func NewAuthAPI(c Client) AuthAPI { - return &httpAuthAPI{ - client: c, - } -} - -type AuthAPI interface { - // Enable auth. - Enable(ctx context.Context) error - - // Disable auth. - Disable(ctx context.Context) error -} - -type httpAuthAPI struct { - client httpClient -} - -func (s *httpAuthAPI) Enable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"PUT"}) -} - -func (s *httpAuthAPI) Disable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"DELETE"}) -} - -func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { - resp, body, err := s.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -type authAPIAction struct { - verb string -} - -func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "enable", "") - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req -} - -type authError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e authError) Error() string { - return e.Message -} - -// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to -// interact with etcd's user creation and modification features. -func NewAuthUserAPI(c Client) AuthUserAPI { - return &httpAuthUserAPI{ - client: c, - } -} - -type AuthUserAPI interface { - // AddUser adds a user. - AddUser(ctx context.Context, username string, password string) error - - // RemoveUser removes a user. - RemoveUser(ctx context.Context, username string) error - - // GetUser retrieves user details. - GetUser(ctx context.Context, username string) (*User, error) - - // GrantUser grants a user some permission roles. - GrantUser(ctx context.Context, username string, roles []string) (*User, error) - - // RevokeUser revokes some permission roles from a user. - RevokeUser(ctx context.Context, username string, roles []string) (*User, error) - - // ChangePassword changes the user's password. - ChangePassword(ctx context.Context, username string, password string) (*User, error) - - // ListUsers lists the users. - ListUsers(ctx context.Context) ([]string, error) -} - -type httpAuthUserAPI struct { - client httpClient -} - -type authUserAPIAction struct { - verb string - username string - user *User -} - -type authUserAPIList struct{} - -func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", l.username) - if l.user == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.user) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { - resp, body, err := u.client.Do(ctx, &authUserAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - - var userList struct { - Users []userListEntry `json:"users"` - } - - if err = json.Unmarshal(body, &userList); err != nil { - return nil, err - } - - ret := make([]string, 0, len(userList.Users)) - for _, u := range userList.Users { - ret = append(ret, u.User) - } - return ret, nil -} - -func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { - user := &User{ - User: username, - Password: password, - } - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "DELETE", - username: username, - }) -} - -func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { - return u.modUser(ctx, &authUserAPIAction{ - verb: "GET", - username: username, - }) -} - -func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Grant: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Revoke: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { - user := &User{ - User: username, - Password: password, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var user User - if err = json.Unmarshal(body, &user); err != nil { - var userR UserRoles - if urerr := json.Unmarshal(body, &userR); urerr != nil { - return nil, err - } - user.User = userR.User - for _, r := range userR.Roles { - user.Roles = append(user.Roles, r.Role) - } - } - return &user, nil -} diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go deleted file mode 100644 index 76d1f040198..00000000000 --- a/vendor/github.com/coreos/etcd/client/cancelreq.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// borrowed from golang/net/context/ctxhttp/cancelreq.go - -package client - -import "net/http" - -func requestCanceler(tr CancelableTransport, req *http.Request) func() { - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go deleted file mode 100644 index f9131b4725c..00000000000 --- a/vendor/github.com/coreos/etcd/client/client.go +++ /dev/null @@ -1,670 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "errors" - "fmt" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "sync" - "time" - - "golang.org/x/net/context" -) - -var ( - ErrNoEndpoints = errors.New("client: no endpoints available") - ErrTooManyRedirects = errors.New("client: too many redirects") - ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") - ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") - errTooManyRedirectChecks = errors.New("client: too many redirect checks") - - // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so - // that Do() will not retry a request - oneShotCtxValue interface{} -) - -var DefaultRequestTimeout = 5 * time.Second - -var DefaultTransport CancelableTransport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, -} - -type EndpointSelectionMode int - -const ( - // EndpointSelectionRandom is the default value of the 'SelectionMode'. - // As the name implies, the client object will pick a node from the members - // of the cluster in a random fashion. If the cluster has three members, A, B, - // and C, the client picks any node from its three members as its request - // destination. - EndpointSelectionRandom EndpointSelectionMode = iota - - // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', - // requests are sent directly to the cluster leader. This reduces - // forwarding roundtrips compared to making requests to etcd followers - // who then forward them to the cluster leader. In the event of a leader - // failure, however, clients configured this way cannot prioritize among - // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' - // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to - // maintain its knowledge of current cluster state. - // - // This mode should be used with Client.AutoSync(). - EndpointSelectionPrioritizeLeader -) - -type Config struct { - // Endpoints defines a set of URLs (schemes, hosts and ports only) - // that can be used to communicate with a logical etcd cluster. For - // example, a three-node cluster could be provided like so: - // - // Endpoints: []string{ - // "http://node1.example.com:2379", - // "http://node2.example.com:2379", - // "http://node3.example.com:2379", - // } - // - // If multiple endpoints are provided, the Client will attempt to - // use them all in the event that one or more of them are unusable. - // - // If Client.Sync is ever called, the Client may cache an alternate - // set of endpoints to continue operation. - Endpoints []string - - // Transport is used by the Client to drive HTTP requests. If not - // provided, DefaultTransport will be used. - Transport CancelableTransport - - // CheckRedirect specifies the policy for handling HTTP redirects. - // If CheckRedirect is not nil, the Client calls it before - // following an HTTP redirect. The sole argument is the number of - // requests that have already been made. If CheckRedirect returns - // an error, Client.Do will not make any further requests and return - // the error back it to the caller. - // - // If CheckRedirect is nil, the Client uses its default policy, - // which is to stop after 10 consecutive requests. - CheckRedirect CheckRedirectFunc - - // Username specifies the user credential to add as an authorization header - Username string - - // Password is the password for the specified user to add as an authorization header - // to the request. - Password string - - // HeaderTimeoutPerRequest specifies the time limit to wait for response - // header in a single request made by the Client. The timeout includes - // connection time, any redirects, and header wait time. - // - // For non-watch GET request, server returns the response body immediately. - // For PUT/POST/DELETE request, server will attempt to commit request - // before responding, which is expected to take `100ms + 2 * RTT`. - // For watch request, server returns the header immediately to notify Client - // watch start. But if server is behind some kind of proxy, the response - // header may be cached at proxy, and Client cannot rely on this behavior. - // - // Especially, wait request will ignore this timeout. - // - // One API call may send multiple requests to different etcd servers until it - // succeeds. Use context of the API to specify the overall timeout. - // - // A HeaderTimeoutPerRequest of zero means no timeout. - HeaderTimeoutPerRequest time.Duration - - // SelectionMode is an EndpointSelectionMode enum that specifies the - // policy for choosing the etcd cluster node to which requests are sent. - SelectionMode EndpointSelectionMode -} - -func (cfg *Config) transport() CancelableTransport { - if cfg.Transport == nil { - return DefaultTransport - } - return cfg.Transport -} - -func (cfg *Config) checkRedirect() CheckRedirectFunc { - if cfg.CheckRedirect == nil { - return DefaultCheckRedirect - } - return cfg.CheckRedirect -} - -// CancelableTransport mimics net/http.Transport, but requires that -// the object also support request cancellation. -type CancelableTransport interface { - http.RoundTripper - CancelRequest(req *http.Request) -} - -type CheckRedirectFunc func(via int) error - -// DefaultCheckRedirect follows up to 10 redirects, but no more. -var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { - if via > 10 { - return ErrTooManyRedirects - } - return nil -} - -type Client interface { - // Sync updates the internal cache of the etcd cluster's membership. - Sync(context.Context) error - - // AutoSync periodically calls Sync() every given interval. - // The recommended sync interval is 10 seconds to 1 minute, which does - // not bring too much overhead to server and makes client catch up the - // cluster change in time. - // - // The example to use it: - // - // for { - // err := client.AutoSync(ctx, 10*time.Second) - // if err == context.DeadlineExceeded || err == context.Canceled { - // break - // } - // log.Print(err) - // } - AutoSync(context.Context, time.Duration) error - - // Endpoints returns a copy of the current set of API endpoints used - // by Client to resolve HTTP requests. If Sync has ever been called, - // this may differ from the initial Endpoints provided in the Config. - Endpoints() []string - - // SetEndpoints sets the set of API endpoints used by Client to resolve - // HTTP requests. If the given endpoints are not valid, an error will be - // returned - SetEndpoints(eps []string) error - - httpClient -} - -func New(cfg Config) (Client, error) { - c := &httpClusterClient{ - clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), - rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - selectionMode: cfg.SelectionMode, - } - if cfg.Username != "" { - c.credentials = &credentials{ - username: cfg.Username, - password: cfg.Password, - } - } - if err := c.SetEndpoints(cfg.Endpoints); err != nil { - return nil, err - } - return c, nil -} - -type httpClient interface { - Do(context.Context, httpAction) (*http.Response, []byte, error) -} - -func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { - return func(ep url.URL) httpClient { - return &redirectFollowingHTTPClient{ - checkRedirect: cr, - client: &simpleHTTPClient{ - transport: tr, - endpoint: ep, - headerTimeout: headerTimeout, - }, - } - } -} - -type credentials struct { - username string - password string -} - -type httpClientFactory func(url.URL) httpClient - -type httpAction interface { - HTTPRequest(url.URL) *http.Request -} - -type httpClusterClient struct { - clientFactory httpClientFactory - endpoints []url.URL - pinned int - credentials *credentials - sync.RWMutex - rand *rand.Rand - selectionMode EndpointSelectionMode -} - -func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { - ceps := make([]url.URL, len(eps)) - copy(ceps, eps) - - // To perform a lookup on the new endpoint list without using the current - // client, we'll copy it - clientCopy := &httpClusterClient{ - clientFactory: c.clientFactory, - credentials: c.credentials, - rand: c.rand, - - pinned: 0, - endpoints: ceps, - } - - mAPI := NewMembersAPI(clientCopy) - leader, err := mAPI.Leader(ctx) - if err != nil { - return "", err - } - if len(leader.ClientURLs) == 0 { - return "", ErrNoLeaderEndpoint - } - - return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? -} - -func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { - if len(eps) == 0 { - return []url.URL{}, ErrNoEndpoints - } - - neps := make([]url.URL, len(eps)) - for i, ep := range eps { - u, err := url.Parse(ep) - if err != nil { - return []url.URL{}, err - } - neps[i] = *u - } - return neps, nil -} - -func (c *httpClusterClient) SetEndpoints(eps []string) error { - neps, err := c.parseEndpoints(eps) - if err != nil { - return err - } - - c.Lock() - defer c.Unlock() - - c.endpoints = shuffleEndpoints(c.rand, neps) - // We're not doing anything for PrioritizeLeader here. This is - // due to not having a context meaning we can't call getLeaderEndpoint - // However, if you're using PrioritizeLeader, you've already been told - // to regularly call sync, where we do have a ctx, and can figure the - // leader. PrioritizeLeader is also quite a loose guarantee, so deal - // with it - c.pinned = 0 - - return nil -} - -func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - action := act - c.RLock() - leps := len(c.endpoints) - eps := make([]url.URL, leps) - n := copy(eps, c.endpoints) - pinned := c.pinned - - if c.credentials != nil { - action = &authedAction{ - act: act, - credentials: *c.credentials, - } - } - c.RUnlock() - - if leps == 0 { - return nil, nil, ErrNoEndpoints - } - - if leps != n { - return nil, nil, errors.New("unable to pick endpoint: copy failed") - } - - var resp *http.Response - var body []byte - var err error - cerr := &ClusterError{} - isOneShot := ctx.Value(&oneShotCtxValue) != nil - - for i := pinned; i < leps+pinned; i++ { - k := i % leps - hc := c.clientFactory(eps[k]) - resp, body, err = hc.Do(ctx, action) - if err != nil { - cerr.Errors = append(cerr.Errors, err) - if err == ctx.Err() { - return nil, nil, ctx.Err() - } - if err == context.Canceled || err == context.DeadlineExceeded { - return nil, nil, err - } - if isOneShot { - return nil, nil, err - } - continue - } - if resp.StatusCode/100 == 5 { - switch resp.StatusCode { - case http.StatusInternalServerError, http.StatusServiceUnavailable: - // TODO: make sure this is a no leader response - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) - default: - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) - } - if isOneShot { - return nil, nil, cerr.Errors[0] - } - continue - } - if k != pinned { - c.Lock() - c.pinned = k - c.Unlock() - } - return resp, body, nil - } - - return nil, nil, cerr -} - -func (c *httpClusterClient) Endpoints() []string { - c.RLock() - defer c.RUnlock() - - eps := make([]string, len(c.endpoints)) - for i, ep := range c.endpoints { - eps[i] = ep.String() - } - - return eps -} - -func (c *httpClusterClient) Sync(ctx context.Context) error { - mAPI := NewMembersAPI(c) - ms, err := mAPI.List(ctx) - if err != nil { - return err - } - - var eps []string - for _, m := range ms { - eps = append(eps, m.ClientURLs...) - } - - neps, err := c.parseEndpoints(eps) - if err != nil { - return err - } - - npin := 0 - - switch c.selectionMode { - case EndpointSelectionRandom: - c.RLock() - eq := endpointsEqual(c.endpoints, neps) - c.RUnlock() - - if eq { - return nil - } - // When items in the endpoint list changes, we choose a new pin - neps = shuffleEndpoints(c.rand, neps) - case EndpointSelectionPrioritizeLeader: - nle, err := c.getLeaderEndpoint(ctx, neps) - if err != nil { - return ErrNoLeaderEndpoint - } - - for i, n := range neps { - if n.String() == nle { - npin = i - break - } - } - default: - return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) - } - - c.Lock() - defer c.Unlock() - c.endpoints = neps - c.pinned = npin - - return nil -} - -func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - err := c.Sync(ctx) - if err != nil { - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - } - } -} - -type roundTripResponse struct { - resp *http.Response - err error -} - -type simpleHTTPClient struct { - transport CancelableTransport - endpoint url.URL - headerTimeout time.Duration -} - -func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - req := act.HTTPRequest(c.endpoint) - - if err := printcURL(req); err != nil { - return nil, nil, err - } - - isWait := false - if req != nil && req.URL != nil { - ws := req.URL.Query().Get("wait") - if len(ws) != 0 { - var err error - isWait, err = strconv.ParseBool(ws) - if err != nil { - return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) - } - } - } - - var hctx context.Context - var hcancel context.CancelFunc - if !isWait && c.headerTimeout > 0 { - hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) - } else { - hctx, hcancel = context.WithCancel(ctx) - } - defer hcancel() - - reqcancel := requestCanceler(c.transport, req) - - rtchan := make(chan roundTripResponse, 1) - go func() { - resp, err := c.transport.RoundTrip(req) - rtchan <- roundTripResponse{resp: resp, err: err} - close(rtchan) - }() - - var resp *http.Response - var err error - - select { - case rtresp := <-rtchan: - resp, err = rtresp.resp, rtresp.err - case <-hctx.Done(): - // cancel and wait for request to actually exit before continuing - reqcancel() - rtresp := <-rtchan - resp = rtresp.resp - switch { - case ctx.Err() != nil: - err = ctx.Err() - case hctx.Err() != nil: - err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) - default: - panic("failed to get error from context") - } - } - - // always check for resp nil-ness to deal with possible - // race conditions between channels above - defer func() { - if resp != nil { - resp.Body.Close() - } - }() - - if err != nil { - return nil, nil, err - } - - var body []byte - done := make(chan struct{}) - go func() { - body, err = ioutil.ReadAll(resp.Body) - done <- struct{}{} - }() - - select { - case <-ctx.Done(): - resp.Body.Close() - <-done - return nil, nil, ctx.Err() - case <-done: - } - - return resp, body, err -} - -type authedAction struct { - act httpAction - credentials credentials -} - -func (a *authedAction) HTTPRequest(url url.URL) *http.Request { - r := a.act.HTTPRequest(url) - r.SetBasicAuth(a.credentials.username, a.credentials.password) - return r -} - -type redirectFollowingHTTPClient struct { - client httpClient - checkRedirect CheckRedirectFunc -} - -func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - next := act - for i := 0; i < 100; i++ { - if i > 0 { - if err := r.checkRedirect(i); err != nil { - return nil, nil, err - } - } - resp, body, err := r.client.Do(ctx, next) - if err != nil { - return nil, nil, err - } - if resp.StatusCode/100 == 3 { - hdr := resp.Header.Get("Location") - if hdr == "" { - return nil, nil, fmt.Errorf("Location header not set") - } - loc, err := url.Parse(hdr) - if err != nil { - return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) - } - next = &redirectedHTTPAction{ - action: act, - location: *loc, - } - continue - } - return resp, body, nil - } - - return nil, nil, errTooManyRedirectChecks -} - -type redirectedHTTPAction struct { - action httpAction - location url.URL -} - -func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { - orig := r.action.HTTPRequest(ep) - orig.URL = &r.location - return orig -} - -func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { - p := r.Perm(len(eps)) - neps := make([]url.URL, len(eps)) - for i, k := range p { - neps[i] = eps[k] - } - return neps -} - -func endpointsEqual(left, right []url.URL) bool { - if len(left) != len(right) { - return false - } - - sLeft := make([]string, len(left)) - sRight := make([]string, len(right)) - for i, l := range left { - sLeft[i] = l.String() - } - for i, r := range right { - sRight[i] = r.String() - } - - sort.Strings(sLeft) - sort.Strings(sRight) - for i := range sLeft { - if sLeft[i] != sRight[i] { - return false - } - } - return true -} diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go deleted file mode 100644 index 34618cdbd9e..00000000000 --- a/vendor/github.com/coreos/etcd/client/cluster_error.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import "fmt" - -type ClusterError struct { - Errors []error -} - -func (ce *ClusterError) Error() string { - s := ErrClusterUnavailable.Error() - for i, e := range ce.Errors { - s += fmt.Sprintf("; error #%d: %s\n", i, e) - } - return s -} - -func (ce *ClusterError) Detail() string { - s := "" - for i, e := range ce.Errors { - s += fmt.Sprintf("error #%d: %s\n", i, e) - } - return s -} diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go deleted file mode 100644 index c8bc9fba20e..00000000000 --- a/vendor/github.com/coreos/etcd/client/curl.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "os" -) - -var ( - cURLDebug = false -) - -func EnablecURLDebug() { - cURLDebug = true -} - -func DisablecURLDebug() { - cURLDebug = false -} - -// printcURL prints the cURL equivalent request to stderr. -// It returns an error if the body of the request cannot -// be read. -// The caller MUST cancel the request if there is an error. -func printcURL(req *http.Request) error { - if !cURLDebug { - return nil - } - var ( - command string - b []byte - err error - ) - - if req.URL != nil { - command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) - } - - if req.Body != nil { - b, err = ioutil.ReadAll(req.Body) - if err != nil { - return err - } - command += fmt.Sprintf(" -d %q", string(b)) - } - - fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) - - // reset body - body := bytes.NewBuffer(b) - req.Body = ioutil.NopCloser(body) - - return nil -} diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go deleted file mode 100644 index bfd7aec93f5..00000000000 --- a/vendor/github.com/coreos/etcd/client/discover.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -// Discoverer is an interface that wraps the Discover method. -type Discoverer interface { - // Discover looks up the etcd servers for the domain. - Discover(domain string) ([]string, error) -} diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go deleted file mode 100644 index 32fdfb52c67..00000000000 --- a/vendor/github.com/coreos/etcd/client/doc.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package client provides bindings for the etcd APIs. - -Create a Config and exchange it for a Client: - - import ( - "net/http" - - "github.com/coreos/etcd/client" - "golang.org/x/net/context" - ) - - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: DefaultTransport, - } - - c, err := client.New(cfg) - if err != nil { - // handle error - } - -Clients are safe for concurrent use by multiple goroutines. - -Create a KeysAPI using the Client, then use it to interact with etcd: - - kAPI := client.NewKeysAPI(c) - - // create a new key /foo with the value "bar" - _, err = kAPI.Create(context.Background(), "/foo", "bar") - if err != nil { - // handle error - } - - // delete the newly created key only if the value is still "bar" - _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) - if err != nil { - // handle error - } - -Use a custom context to set timeouts on your operations: - - import "time" - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // set a new key, ignoring it's previous state - _, err := kAPI.Set(ctx, "/ping", "pong", nil) - if err != nil { - if err == context.DeadlineExceeded { - // request took longer than 5s - } else { - // handle error - } - } - -*/ -package client diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go deleted file mode 100644 index 216139c9cc7..00000000000 --- a/vendor/github.com/coreos/etcd/client/keys.generated.go +++ /dev/null @@ -1,1087 +0,0 @@ -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package client - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81819 = 1 - codecSelferC_RAW1819 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1819 = 10 - codecSelferValueTypeMap1819 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1819 = 2 - codecSelfer_containerMapValue1819 = 3 - codecSelfer_containerMapEnd1819 = 4 - codecSelfer_containerArrayElem1819 = 6 - codecSelfer_containerArrayEnd1819 = 7 -) - -var ( - codecSelferBitsize1819 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1819 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1819 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 time.Time - _ = v0 - } -} - -func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("action")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("node")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("prevNode")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1819 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1819 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys3 { - case "action": - if r.TryDecodeAsNil() { - x.Action = "" - } else { - yyv4 := &x.Action - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "node": - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - case "prevNode": - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Action = "" - } else { - yyv9 := &x.Action - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*string)(yyv9)) = r.DecodeString() - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Dir != false - yyq2[6] = x.Expiration != nil - yyq2[7] = x.TTL != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("dir")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("nodes")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("createdIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("modifiedIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[6] { - if x.Expiration == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { - r.EncodeBuiltin(yym23, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym22 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("expiration")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Expiration == nil { - r.EncodeNil() - } else { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { - r.EncodeBuiltin(yym25, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym24 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq2[7] { - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("ttl")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1819 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1819 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv4 := &x.Key - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*string)(yyv4)) = r.DecodeString() - } - } - case "dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv6 := &x.Dir - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*bool)(yyv6)) = r.DecodeBool() - } - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv8 := &x.Value - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*string)(yyv8)) = r.DecodeString() - } - } - case "nodes": - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv10 := &x.Nodes - yyv10.CodecDecodeSelf(d) - } - case "createdIndex": - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - yyv11 := &x.CreatedIndex - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) - } - } - case "modifiedIndex": - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - yyv13 := &x.ModifiedIndex - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) - } - } - case "expiration": - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 { - r.DecodeBuiltin(yym17, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym16 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - case "ttl": - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv18 := &x.TTL - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*int64)(yyv18)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj20 int - var yyb20 bool - var yyhl20 bool = l >= 0 - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - yyv21 := &x.Key - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*string)(yyv21)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Dir = false - } else { - yyv23 := &x.Dir - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(yyv23)) = r.DecodeBool() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - yyv25 := &x.Value - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - *((*string)(yyv25)) = r.DecodeString() - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv27 := &x.Nodes - yyv27.CodecDecodeSelf(d) - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - yyv28 := &x.CreatedIndex - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - *((*uint64)(yyv28)) = uint64(r.DecodeUint(64)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - yyv30 := &x.ModifiedIndex - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym33 := z.DecBinary() - _ = yym33 - if false { - } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 { - r.DecodeBuiltin(yym34, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym33 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym33 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - yyv35 := &x.TTL - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*int64)(yyv35)) = int64(r.DecodeInt(64)) - } - } - for { - yyj20++ - if yyhl20 { - yyb20 = yyj20 > l - } else { - yyb20 = r.CheckBreak() - } - if yyb20 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj20-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encNodes((Nodes)(x), e) - } - } -} - -func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decNodes((*Nodes)(x), d) - } -} - -func (x codecSelfer1819) encNodes(v Nodes, e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyv1 == nil { - r.EncodeNil() - } else { - yyv1.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []*Node{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]*Node, yyrl1) - } - } else { - yyv1 = make([]*Node, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw2 := yyv1[yyj1] - yyw2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, nil) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw3 := yyv1[yyj1] - yyw3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, nil) // var yyz1 *Node - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - if yyv1[yyj1] != nil { - *yyv1[yyj1] = Node{} - } - } else { - if yyv1[yyj1] == nil { - yyv1[yyj1] = new(Node) - } - yyw4 := yyv1[yyj1] - yyw4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []*Node{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go deleted file mode 100644 index 4a6c41a78bc..00000000000 --- a/vendor/github.com/coreos/etcd/client/keys.go +++ /dev/null @@ -1,682 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/coreos/etcd/pkg/pathutil" - "github.com/ugorji/go/codec" - "golang.org/x/net/context" -) - -const ( - ErrorCodeKeyNotFound = 100 - ErrorCodeTestFailed = 101 - ErrorCodeNotFile = 102 - ErrorCodeNotDir = 104 - ErrorCodeNodeExist = 105 - ErrorCodeRootROnly = 107 - ErrorCodeDirNotEmpty = 108 - ErrorCodeUnauthorized = 110 - - ErrorCodePrevValueRequired = 201 - ErrorCodeTTLNaN = 202 - ErrorCodeIndexNaN = 203 - ErrorCodeInvalidField = 209 - ErrorCodeInvalidForm = 210 - - ErrorCodeRaftInternal = 300 - ErrorCodeLeaderElect = 301 - - ErrorCodeWatcherCleared = 400 - ErrorCodeEventIndexCleared = 401 -) - -type Error struct { - Code int `json:"errorCode"` - Message string `json:"message"` - Cause string `json:"cause"` - Index uint64 `json:"index"` -} - -func (e Error) Error() string { - return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) -} - -var ( - ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") - ErrEmptyBody = errors.New("client: response body is empty") -) - -// PrevExistType is used to define an existence condition when setting -// or deleting Nodes. -type PrevExistType string - -const ( - PrevIgnore = PrevExistType("") - PrevExist = PrevExistType("true") - PrevNoExist = PrevExistType("false") -) - -var ( - defaultV2KeysPrefix = "/v2/keys" -) - -// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value -// API over HTTP. -func NewKeysAPI(c Client) KeysAPI { - return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) -} - -// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller -// to provide a custom base URL path. This should only be used in -// very rare cases. -func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { - return &httpKeysAPI{ - client: c, - prefix: p, - } -} - -type KeysAPI interface { - // Get retrieves a set of Nodes from etcd - Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) - - // Set assigns a new value to a Node identified by a given key. The caller - // may define a set of conditions in the SetOptions. If SetOptions.Dir=true - // then value is ignored. - Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) - - // Delete removes a Node identified by the given key, optionally destroying - // all of its children as well. The caller may define a set of required - // conditions in an DeleteOptions object. - Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) - - // Create is an alias for Set w/ PrevExist=false - Create(ctx context.Context, key, value string) (*Response, error) - - // CreateInOrder is used to atomically create in-order keys within the given directory. - CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) - - // Update is an alias for Set w/ PrevExist=true - Update(ctx context.Context, key, value string) (*Response, error) - - // Watcher builds a new Watcher targeted at a specific Node identified - // by the given key. The Watcher may be configured at creation time - // through a WatcherOptions object. The returned Watcher is designed - // to emit events that happen to a Node, and optionally to its children. - Watcher(key string, opts *WatcherOptions) Watcher -} - -type WatcherOptions struct { - // AfterIndex defines the index after-which the Watcher should - // start emitting events. For example, if a value of 5 is - // provided, the first event will have an index >= 6. - // - // Setting AfterIndex to 0 (default) means that the Watcher - // should start watching for events starting at the current - // index, whatever that may be. - AfterIndex uint64 - - // Recursive specifies whether or not the Watcher should emit - // events that occur in children of the given keyspace. If set - // to false (default), events will be limited to those that - // occur for the exact key. - Recursive bool -} - -type CreateInOrderOptions struct { - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration -} - -type SetOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Set operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - // - // PrevValue is ignored if Dir=true - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Set operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // PrevExist specifies whether the Node must currently exist - // (PrevExist) or not (PrevNoExist). If the caller does not - // care about existence, set PrevExist to PrevIgnore, or simply - // leave it unset. - PrevExist PrevExistType - - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration - - // Refresh set to true means a TTL value can be updated - // without firing a watch or changing the node value. A - // value must not be provided when refreshing a key. - Refresh bool - - // Dir specifies whether or not this Node should be created as a directory. - Dir bool - - // NoValueOnSuccess specifies whether the response contains the current value of the Node. - // If set, the response will only contain the current value when the request fails. - NoValueOnSuccess bool -} - -type GetOptions struct { - // Recursive defines whether or not all children of the Node - // should be returned. - Recursive bool - - // Sort instructs the server whether or not to sort the Nodes. - // If true, the Nodes are sorted alphabetically by key in - // ascending order (A to z). If false (default), the Nodes will - // not be sorted and the ordering used should not be considered - // predictable. - Sort bool - - // Quorum specifies whether it gets the latest committed value that - // has been applied in quorum of members, which ensures external - // consistency (or linearizability). - Quorum bool -} - -type DeleteOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Delete operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Delete operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // Recursive defines whether or not all children of the Node - // should be deleted. If set to true, all children of the Node - // identified by the given key will be deleted. If left unset - // or explicitly set to false, only a single Node will be - // deleted. - Recursive bool - - // Dir specifies whether or not this Node should be removed as a directory. - Dir bool -} - -type Watcher interface { - // Next blocks until an etcd event occurs, then returns a Response - // representing that event. The behavior of Next depends on the - // WatcherOptions used to construct the Watcher. Next is designed to - // be called repeatedly, each time blocking until a subsequent event - // is available. - // - // If the provided context is cancelled, Next will return a non-nil - // error. Any other failures encountered while waiting for the next - // event (connection issues, deserialization failures, etc) will - // also result in a non-nil error. - Next(context.Context) (*Response, error) -} - -type Response struct { - // Action is the name of the operation that occurred. Possible values - // include get, set, delete, update, create, compareAndSwap, - // compareAndDelete and expire. - Action string `json:"action"` - - // Node represents the state of the relevant etcd Node. - Node *Node `json:"node"` - - // PrevNode represents the previous state of the Node. PrevNode is non-nil - // only if the Node existed before the action occurred and the action - // caused a change to the Node. - PrevNode *Node `json:"prevNode"` - - // Index holds the cluster-level index at the time the Response was generated. - // This index is not tied to the Node(s) contained in this Response. - Index uint64 `json:"-"` - - // ClusterID holds the cluster-level ID reported by the server. This - // should be different for different etcd clusters. - ClusterID string `json:"-"` -} - -type Node struct { - // Key represents the unique location of this Node (e.g. "/foo/bar"). - Key string `json:"key"` - - // Dir reports whether node describes a directory. - Dir bool `json:"dir,omitempty"` - - // Value is the current data stored on this Node. If this Node - // is a directory, Value will be empty. - Value string `json:"value"` - - // Nodes holds the children of this Node, only if this Node is a directory. - // This slice of will be arbitrarily deep (children, grandchildren, great- - // grandchildren, etc.) if a recursive Get or Watch request were made. - Nodes Nodes `json:"nodes"` - - // CreatedIndex is the etcd index at-which this Node was created. - CreatedIndex uint64 `json:"createdIndex"` - - // ModifiedIndex is the etcd index at-which this Node was last modified. - ModifiedIndex uint64 `json:"modifiedIndex"` - - // Expiration is the server side expiration time of the key. - Expiration *time.Time `json:"expiration,omitempty"` - - // TTL is the time to live of the key in second. - TTL int64 `json:"ttl,omitempty"` -} - -func (n *Node) String() string { - return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) -} - -// TTLDuration returns the Node's TTL as a time.Duration object -func (n *Node) TTLDuration() time.Duration { - return time.Duration(n.TTL) * time.Second -} - -type Nodes []*Node - -// interfaces for sorting - -func (ns Nodes) Len() int { return len(ns) } -func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } -func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } - -type httpKeysAPI struct { - client httpClient - prefix string -} - -func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { - act := &setAction{ - Prefix: k.prefix, - Key: key, - Value: val, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.PrevExist = opts.PrevExist - act.TTL = opts.TTL - act.Refresh = opts.Refresh - act.Dir = opts.Dir - act.NoValueOnSuccess = opts.NoValueOnSuccess - } - - doCtx := ctx - if act.PrevExist == PrevNoExist { - doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue) - } - resp, body, err := k.client.Do(doCtx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) -} - -func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { - act := &createInOrderAction{ - Prefix: k.prefix, - Dir: dir, - Value: val, - } - - if opts != nil { - act.TTL = opts.TTL - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) -} - -func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { - act := &deleteAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.Dir = opts.Dir - act.Recursive = opts.Recursive - } - - doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue) - resp, body, err := k.client.Do(doCtx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { - act := &getAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - act.Sorted = opts.Sort - act.Quorum = opts.Quorum - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { - act := waitAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - if opts.AfterIndex > 0 { - act.WaitIndex = opts.AfterIndex + 1 - } - } - - return &httpWatcher{ - client: k.client, - nextWait: act, - } -} - -type httpWatcher struct { - client httpClient - nextWait waitAction -} - -func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { - for { - httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) - if err != nil { - return nil, err - } - - resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) - if err != nil { - if err == ErrEmptyBody { - continue - } - return nil, err - } - - hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 - return resp, nil - } -} - -// v2KeysURL forms a URL representing the location of a key. -// The endpoint argument represents the base URL of an etcd -// server. The prefix is the path needed to route from the -// provided endpoint's path to the root of the keys API -// (typically "/v2/keys"). -func v2KeysURL(ep url.URL, prefix, key string) *url.URL { - // We concatenate all parts together manually. We cannot use - // path.Join because it does not reserve trailing slash. - // We call CanonicalURLPath to further cleanup the path. - if prefix != "" && prefix[0] != '/' { - prefix = "/" + prefix - } - if key != "" && key[0] != '/' { - key = "/" + key - } - ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) - return &ep -} - -type getAction struct { - Prefix string - Key string - Recursive bool - Sorted bool - Quorum bool -} - -func (g *getAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, g.Prefix, g.Key) - - params := u.Query() - params.Set("recursive", strconv.FormatBool(g.Recursive)) - params.Set("sorted", strconv.FormatBool(g.Sorted)) - params.Set("quorum", strconv.FormatBool(g.Quorum)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type waitAction struct { - Prefix string - Key string - WaitIndex uint64 - Recursive bool -} - -func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, w.Prefix, w.Key) - - params := u.Query() - params.Set("wait", "true") - params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) - params.Set("recursive", strconv.FormatBool(w.Recursive)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type setAction struct { - Prefix string - Key string - Value string - PrevValue string - PrevIndex uint64 - PrevExist PrevExistType - TTL time.Duration - Refresh bool - Dir bool - NoValueOnSuccess bool -} - -func (a *setAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - form := url.Values{} - - // we're either creating a directory or setting a key - if a.Dir { - params.Set("dir", strconv.FormatBool(a.Dir)) - } else { - // These options are only valid for setting a key - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - form.Add("value", a.Value) - } - - // Options which apply to both setting a key and creating a dir - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.PrevExist != PrevIgnore { - params.Set("prevExist", string(a.PrevExist)) - } - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - - if a.Refresh { - form.Add("refresh", "true") - } - if a.NoValueOnSuccess { - params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess)) - } - - u.RawQuery = params.Encode() - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("PUT", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type deleteAction struct { - Prefix string - Key string - PrevValue string - PrevIndex uint64 - Dir bool - Recursive bool -} - -func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.Dir { - params.Set("dir", "true") - } - if a.Recursive { - params.Set("recursive", "true") - } - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("DELETE", u.String(), nil) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type createInOrderAction struct { - Prefix string - Dir string - Value string - TTL time.Duration -} - -func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Dir) - - form := url.Values{} - form.Add("value", a.Value) - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("POST", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return req -} - -func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { - switch code { - case http.StatusOK, http.StatusCreated: - if len(body) == 0 { - return nil, ErrEmptyBody - } - res, err = unmarshalSuccessfulKeysResponse(header, body) - default: - err = unmarshalFailedKeysResponse(body) - } - - return -} - -func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { - var res Response - err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) - if err != nil { - return nil, ErrInvalidJSON - } - if header.Get("X-Etcd-Index") != "" { - res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) - if err != nil { - return nil, err - } - } - res.ClusterID = header.Get("X-Etcd-Cluster-ID") - return &res, nil -} - -func unmarshalFailedKeysResponse(body []byte) error { - var etcdErr Error - if err := json.Unmarshal(body, &etcdErr); err != nil { - return ErrInvalidJSON - } - return etcdErr -} diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go deleted file mode 100644 index 23adf07ad91..00000000000 --- a/vendor/github.com/coreos/etcd/client/members.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/url" - "path" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - defaultV2MembersPrefix = "/v2/members" - defaultLeaderSuffix = "/leader" -) - -type Member struct { - // ID is the unique identifier of this Member. - ID string `json:"id"` - - // Name is a human-readable, non-unique identifier of this Member. - Name string `json:"name"` - - // PeerURLs represents the HTTP(S) endpoints this Member uses to - // participate in etcd's consensus protocol. - PeerURLs []string `json:"peerURLs"` - - // ClientURLs represents the HTTP(S) endpoints on which this Member - // serves it's client-facing APIs. - ClientURLs []string `json:"clientURLs"` -} - -type memberCollection []Member - -func (c *memberCollection) UnmarshalJSON(data []byte) error { - d := struct { - Members []Member - }{} - - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - if d.Members == nil { - *c = make([]Member, 0) - return nil - } - - *c = d.Members - return nil -} - -type memberCreateOrUpdateRequest struct { - PeerURLs types.URLs -} - -func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { - s := struct { - PeerURLs []string `json:"peerURLs"` - }{ - PeerURLs: make([]string, len(m.PeerURLs)), - } - - for i, u := range m.PeerURLs { - s.PeerURLs[i] = u.String() - } - - return json.Marshal(&s) -} - -// NewMembersAPI constructs a new MembersAPI that uses HTTP to -// interact with etcd's membership API. -func NewMembersAPI(c Client) MembersAPI { - return &httpMembersAPI{ - client: c, - } -} - -type MembersAPI interface { - // List enumerates the current cluster membership. - List(ctx context.Context) ([]Member, error) - - // Add instructs etcd to accept a new Member into the cluster. - Add(ctx context.Context, peerURL string) (*Member, error) - - // Remove demotes an existing Member out of the cluster. - Remove(ctx context.Context, mID string) error - - // Update instructs etcd to update an existing Member in the cluster. - Update(ctx context.Context, mID string, peerURLs []string) error - - // Leader gets current leader of the cluster - Leader(ctx context.Context) (*Member, error) -} - -type httpMembersAPI struct { - client httpClient -} - -func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { - req := &membersAPIActionList{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var mCollection memberCollection - if err := json.Unmarshal(body, &mCollection); err != nil { - return nil, err - } - - return []Member(mCollection), nil -} - -func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { - urls, err := types.NewURLs([]string{peerURL}) - if err != nil { - return nil, err - } - - req := &membersAPIActionAdd{peerURLs: urls} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusCreated { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return nil, err - } - return nil, merr - } - - var memb Member - if err := json.Unmarshal(body, &memb); err != nil { - return nil, err - } - - return &memb, nil -} - -func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { - urls, err := types.NewURLs(peerURLs) - if err != nil { - return err - } - - req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { - return err - } - - if resp.StatusCode != http.StatusNoContent { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return err - } - return merr - } - - return nil -} - -func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { - req := &membersAPIActionRemove{memberID: memberID} - resp, _, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) -} - -func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { - req := &membersAPIActionLeader{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var leader Member - if err := json.Unmarshal(body, &leader); err != nil { - return nil, err - } - - return &leader, nil -} - -type membersAPIActionList struct{} - -func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type membersAPIActionRemove struct { - memberID string -} - -func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, d.memberID) - req, _ := http.NewRequest("DELETE", u.String(), nil) - return req -} - -type membersAPIActionAdd struct { - peerURLs types.URLs -} - -func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -type membersAPIActionUpdate struct { - memberID string - peerURLs types.URLs -} - -func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - u.Path = path.Join(u.Path, a.memberID) - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -func assertStatusCode(got int, want ...int) (err error) { - for _, w := range want { - if w == got { - return nil - } - } - return fmt.Errorf("unexpected status code %d", got) -} - -type membersAPIActionLeader struct{} - -func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, defaultLeaderSuffix) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -// v2MembersURL add the necessary path to the provided endpoint -// to route requests to the default v2 members API. -func v2MembersURL(ep url.URL) *url.URL { - ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) - return &ep -} - -type membersError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e membersError) Error() string { - return e.Message -} diff --git a/vendor/github.com/coreos/etcd/client/srv.go b/vendor/github.com/coreos/etcd/client/srv.go deleted file mode 100644 index fdfa3435921..00000000000 --- a/vendor/github.com/coreos/etcd/client/srv.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "fmt" - "net" - "net/url" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV -) - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -// Discover looks up the etcd servers for the domain. -func (d *srvDiscover) Discover(domain string) ([]string, error) { - var urls []*url.URL - - updateURLs := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", domain) - if err != nil { - return err - } - for _, srv := range addrs { - urls = append(urls, &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), - }) - } - return nil - } - - errHTTPS := updateURLs("etcd-client-ssl", "https") - errHTTP := updateURLs("etcd-client", "http") - - if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) - } - - endpoints := make([]string, len(urls)) - for i := range urls { - endpoints[i] = urls[i].String() - } - return endpoints, nil -} diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go deleted file mode 100644 index 15a8babff4d..00000000000 --- a/vendor/github.com/coreos/etcd/client/util.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "regexp" -) - -var ( - roleNotFoundRegExp *regexp.Regexp - userNotFoundRegExp *regexp.Regexp -) - -func init() { - roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") - userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") -} - -// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. -func IsKeyNotFound(err error) bool { - if cErr, ok := err.(Error); ok { - return cErr.Code == ErrorCodeKeyNotFound - } - return false -} - -// IsRoleNotFound returns true if the error means role not found of v2 API. -func IsRoleNotFound(err error) bool { - if ae, ok := err.(authError); ok { - return roleNotFoundRegExp.MatchString(ae.Message) - } - return false -} - -// IsUserNotFound returns true if the error means user not found of v2 API. -func IsUserNotFound(err error) bool { - if ae, ok := err.(authError); ok { - return userNotFoundRegExp.MatchString(ae.Message) - } - return false -} diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go deleted file mode 100644 index b995bce8e3f..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "fmt" - "strings" - - "github.com/coreos/etcd/auth/authpb" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -type ( - AuthEnableResponse pb.AuthEnableResponse - AuthDisableResponse pb.AuthDisableResponse - AuthenticateResponse pb.AuthenticateResponse - AuthUserAddResponse pb.AuthUserAddResponse - AuthUserDeleteResponse pb.AuthUserDeleteResponse - AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse - AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse - AuthUserGetResponse pb.AuthUserGetResponse - AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse - AuthRoleAddResponse pb.AuthRoleAddResponse - AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse - AuthRoleGetResponse pb.AuthRoleGetResponse - AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse - AuthRoleDeleteResponse pb.AuthRoleDeleteResponse - AuthUserListResponse pb.AuthUserListResponse - AuthRoleListResponse pb.AuthRoleListResponse - - PermissionType authpb.Permission_Type - Permission authpb.Permission -) - -const ( - PermRead = authpb.READ - PermWrite = authpb.WRITE - PermReadWrite = authpb.READWRITE -) - -type Auth interface { - // AuthEnable enables auth of an etcd cluster. - AuthEnable(ctx context.Context) (*AuthEnableResponse, error) - - // AuthDisable disables auth of an etcd cluster. - AuthDisable(ctx context.Context) (*AuthDisableResponse, error) - - // UserAdd adds a new user to an etcd cluster. - UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) - - // UserDelete deletes a user from an etcd cluster. - UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) - - // UserChangePassword changes a password of a user. - UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) - - // UserGrantRole grants a role to a user. - UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) - - // UserGet gets a detailed information of a user. - UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) - - // UserList gets a list of all users. - UserList(ctx context.Context) (*AuthUserListResponse, error) - - // UserRevokeRole revokes a role of a user. - UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) - - // RoleAdd adds a new role to an etcd cluster. - RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) - - // RoleGrantPermission grants a permission to a role. - RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) - - // RoleGet gets a detailed information of a role. - RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) - - // RoleList gets a list of all roles. - RoleList(ctx context.Context) (*AuthRoleListResponse, error) - - // RoleRevokePermission revokes a permission from a role. - RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) - - // RoleDelete deletes a role. - RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) -} - -type auth struct { - c *Client - - conn *grpc.ClientConn // conn in-use - remote pb.AuthClient -} - -func NewAuth(c *Client) Auth { - conn := c.ActiveConnection() - return &auth{ - conn: c.ActiveConnection(), - remote: pb.NewAuthClient(conn), - c: c, - } -} - -func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false)) - return (*AuthEnableResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false)) - return (*AuthDisableResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { - resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}) - return (*AuthUserAddResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { - resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}) - return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { - resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}) - return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { - resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}) - return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { - resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false)) - return (*AuthUserGetResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { - resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false)) - return (*AuthUserListResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { - resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}) - return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { - resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}) - return (*AuthRoleAddResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { - perm := &authpb.Permission{ - Key: []byte(key), - RangeEnd: []byte(rangeEnd), - PermType: authpb.Permission_Type(permType), - } - resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}) - return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { - resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false)) - return (*AuthRoleGetResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { - resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false)) - return (*AuthRoleListResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { - resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}) - return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) -} - -func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { - resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}) - return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) -} - -func StrToPermissionType(s string) (PermissionType, error) { - val, ok := authpb.Permission_Type_value[strings.ToUpper(s)] - if ok { - return PermissionType(val), nil - } - return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s) -} - -type authenticator struct { - conn *grpc.ClientConn // conn in-use - remote pb.AuthClient -} - -func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { - resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false)) - return (*AuthenticateResponse)(resp), toErr(ctx, err) -} - -func (auth *authenticator) close() { - auth.conn.Close() -} - -func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return nil, err - } - - return &authenticator{ - conn: conn, - remote: pb.NewAuthClient(conn), - }, nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer.go deleted file mode 100644 index 0fef9c54934..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/balancer.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -// ErrNoAddrAvilable is returned by Get() when the balancer does not have -// any active connection to endpoints at the time. -// This error is returned only when opts.BlockingWait is true. -var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available") - -// simpleBalancer does the bare minimum to expose multiple eps -// to the grpc reconnection code path -type simpleBalancer struct { - // addrs are the client's endpoints for grpc - addrs []grpc.Address - // notifyCh notifies grpc of the set of addresses for connecting - notifyCh chan []grpc.Address - - // readyc closes once the first connection is up - readyc chan struct{} - readyOnce sync.Once - - // mu protects upEps, pinAddr, and connectingAddr - mu sync.RWMutex - // upEps holds the current endpoints that have an active connection - upEps map[string]struct{} - // upc closes when upEps transitions from empty to non-zero or the balancer closes. - upc chan struct{} - - // grpc issues TLS cert checks using the string passed into dial so - // that string must be the host. To recover the full scheme://host URL, - // have a map from hosts to the original endpoint. - host2ep map[string]string - - // pinAddr is the currently pinned address; set to the empty string on - // intialization and shutdown. - pinAddr string - - closed bool -} - -func newSimpleBalancer(eps []string) *simpleBalancer { - notifyCh := make(chan []grpc.Address, 1) - addrs := make([]grpc.Address, len(eps)) - for i := range eps { - addrs[i].Addr = getHost(eps[i]) - } - notifyCh <- addrs - sb := &simpleBalancer{ - addrs: addrs, - notifyCh: notifyCh, - readyc: make(chan struct{}), - upEps: make(map[string]struct{}), - upc: make(chan struct{}), - host2ep: getHost2ep(eps), - } - return sb -} - -func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } - -func (b *simpleBalancer) ConnectNotify() <-chan struct{} { - b.mu.Lock() - defer b.mu.Unlock() - return b.upc -} - -func (b *simpleBalancer) getEndpoint(host string) string { - b.mu.Lock() - defer b.mu.Unlock() - return b.host2ep[host] -} - -func getHost2ep(eps []string) map[string]string { - hm := make(map[string]string, len(eps)) - for i := range eps { - _, host, _ := parseEndpoint(eps[i]) - hm[host] = eps[i] - } - return hm -} - -func (b *simpleBalancer) updateAddrs(eps []string) { - np := getHost2ep(eps) - - b.mu.Lock() - defer b.mu.Unlock() - - match := len(np) == len(b.host2ep) - for k, v := range np { - if b.host2ep[k] != v { - match = false - break - } - } - if match { - // same endpoints, so no need to update address - return - } - - b.host2ep = np - - addrs := make([]grpc.Address, 0, len(eps)) - for i := range eps { - addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])}) - } - b.addrs = addrs - b.notifyCh <- addrs -} - -func (b *simpleBalancer) Up(addr grpc.Address) func(error) { - b.mu.Lock() - defer b.mu.Unlock() - - // gRPC might call Up after it called Close. We add this check - // to "fix" it up at application layer. Or our simplerBalancer - // might panic since b.upc is closed. - if b.closed { - return func(err error) {} - } - - if len(b.upEps) == 0 { - // notify waiting Get()s and pin first connected address - close(b.upc) - b.pinAddr = addr.Addr - } - b.upEps[addr.Addr] = struct{}{} - - // notify client that a connection is up - b.readyOnce.Do(func() { close(b.readyc) }) - - return func(err error) { - b.mu.Lock() - delete(b.upEps, addr.Addr) - if len(b.upEps) == 0 && b.pinAddr != "" { - b.upc = make(chan struct{}) - } else if b.pinAddr == addr.Addr { - // choose new random up endpoint - for k := range b.upEps { - b.pinAddr = k - break - } - } - b.mu.Unlock() - } -} - -func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { - var addr string - - // If opts.BlockingWait is false (for fail-fast RPCs), it should return - // an address it has notified via Notify immediately instead of blocking. - if !opts.BlockingWait { - b.mu.RLock() - closed := b.closed - addr = b.pinAddr - upEps := len(b.upEps) - b.mu.RUnlock() - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - - if upEps == 0 { - return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable - } - return grpc.Address{Addr: addr}, func() {}, nil - } - - for { - b.mu.RLock() - ch := b.upc - b.mu.RUnlock() - select { - case <-ch: - case <-ctx.Done(): - return grpc.Address{Addr: ""}, nil, ctx.Err() - } - b.mu.RLock() - addr = b.pinAddr - upEps := len(b.upEps) - b.mu.RUnlock() - if addr == "" { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if upEps > 0 { - break - } - } - return grpc.Address{Addr: addr}, func() {}, nil -} - -func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } - -func (b *simpleBalancer) Close() error { - b.mu.Lock() - defer b.mu.Unlock() - // In case gRPC calls close twice. TODO: remove the checking - // when we are sure that gRPC wont call close twice. - if b.closed { - return nil - } - b.closed = true - close(b.notifyCh) - // terminate all waiting Get()s - b.pinAddr = "" - if len(b.upEps) == 0 { - close(b.upc) - } - return nil -} - -func getHost(ep string) string { - url, uerr := url.Parse(ep) - if uerr != nil || !strings.Contains(ep, "://") { - return ep - } - return url.Host -} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go deleted file mode 100644 index 8263890bdff..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ /dev/null @@ -1,427 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "strings" - "sync" - "time" - - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" -) - -var ( - ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") -) - -// Client provides and manages an etcd v3 client session. -type Client struct { - Cluster - KV - Lease - Watcher - Auth - Maintenance - - conn *grpc.ClientConn - cfg Config - creds *credentials.TransportCredentials - balancer *simpleBalancer - retryWrapper retryRpcFunc - retryAuthWrapper retryRpcFunc - - ctx context.Context - cancel context.CancelFunc - - // Username is a username for authentication - Username string - // Password is a password for authentication - Password string - // tokenCred is an instance of WithPerRPCCredentials()'s argument - tokenCred *authTokenCredential -} - -// New creates a new etcdv3 client from a given configuration. -func New(cfg Config) (*Client, error) { - if len(cfg.Endpoints) == 0 { - return nil, ErrNoAvailableEndpoints - } - - return newClient(&cfg) -} - -// NewFromURL creates a new etcdv3 client from a URL. -func NewFromURL(url string) (*Client, error) { - return New(Config{Endpoints: []string{url}}) -} - -// NewFromConfigFile creates a new etcdv3 client from a configuration file. -func NewFromConfigFile(path string) (*Client, error) { - cfg, err := configFromFile(path) - if err != nil { - return nil, err - } - return New(*cfg) -} - -// Close shuts down the client's etcd connections. -func (c *Client) Close() error { - c.cancel() - c.Watcher.Close() - c.Lease.Close() - return toErr(c.ctx, c.conn.Close()) -} - -// Ctx is a context for "out of band" messages (e.g., for sending -// "clean up" message when another context is canceled). It is -// canceled on client Close(). -func (c *Client) Ctx() context.Context { return c.ctx } - -// Endpoints lists the registered endpoints for the client. -func (c *Client) Endpoints() (eps []string) { - // copy the slice; protect original endpoints from being changed - eps = make([]string, len(c.cfg.Endpoints)) - copy(eps, c.cfg.Endpoints) - return -} - -// SetEndpoints updates client's endpoints. -func (c *Client) SetEndpoints(eps ...string) { - c.cfg.Endpoints = eps - c.balancer.updateAddrs(eps) -} - -// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. -func (c *Client) Sync(ctx context.Context) error { - mresp, err := c.MemberList(ctx) - if err != nil { - return err - } - var eps []string - for _, m := range mresp.Members { - eps = append(eps, m.ClientURLs...) - } - c.SetEndpoints(eps...) - return nil -} - -func (c *Client) autoSync() { - if c.cfg.AutoSyncInterval == time.Duration(0) { - return - } - - for { - select { - case <-c.ctx.Done(): - return - case <-time.After(c.cfg.AutoSyncInterval): - ctx, _ := context.WithTimeout(c.ctx, 5*time.Second) - if err := c.Sync(ctx); err != nil && err != c.ctx.Err() { - logger.Println("Auto sync endpoints failed:", err) - } - } - } -} - -type authTokenCredential struct { - token string - tokenMu *sync.RWMutex -} - -func (cred authTokenCredential) RequireTransportSecurity() bool { - return false -} - -func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { - cred.tokenMu.RLock() - defer cred.tokenMu.RUnlock() - return map[string]string{ - "token": cred.token, - }, nil -} - -func parseEndpoint(endpoint string) (proto string, host string, scheme string) { - proto = "tcp" - host = endpoint - url, uerr := url.Parse(endpoint) - if uerr != nil || !strings.Contains(endpoint, "://") { - return - } - scheme = url.Scheme - - // strip scheme:// prefix since grpc dials by host - host = url.Host - switch url.Scheme { - case "http", "https": - case "unix": - proto = "unix" - default: - proto, host = "", "" - } - return -} - -func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { - creds = c.creds - switch scheme { - case "unix": - case "http": - creds = nil - case "https": - if creds != nil { - break - } - tlsconfig := &tls.Config{} - emptyCreds := credentials.NewTLS(tlsconfig) - creds = &emptyCreds - default: - creds = nil - } - return -} - -// dialSetupOpts gives the dial opts prior to any authentication -func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) { - if c.cfg.DialTimeout > 0 { - opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} - } - opts = append(opts, dopts...) - - f := func(host string, t time.Duration) (net.Conn, error) { - proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host)) - if proto == "" { - return nil, fmt.Errorf("unknown scheme for %q", host) - } - select { - case <-c.ctx.Done(): - return nil, c.ctx.Err() - default: - } - dialer := &net.Dialer{Timeout: t} - return dialer.DialContext(c.ctx, proto, host) - } - opts = append(opts, grpc.WithDialer(f)) - - creds := c.creds - if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 { - creds = c.processCreds(scheme) - } - if creds != nil { - opts = append(opts, grpc.WithTransportCredentials(*creds)) - } else { - opts = append(opts, grpc.WithInsecure()) - } - - return opts -} - -// Dial connects to a single endpoint using the client's config. -func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) { - return c.dial(endpoint) -} - -func (c *Client) getToken(ctx context.Context) error { - var err error // return last error in a case of fail - var auth *authenticator - - for i := 0; i < len(c.cfg.Endpoints); i++ { - endpoint := c.cfg.Endpoints[i] - host := getHost(endpoint) - // use dial options without dopts to avoid reusing the client balancer - auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint)) - if err != nil { - continue - } - defer auth.close() - - var resp *AuthenticateResponse - resp, err = auth.authenticate(ctx, c.Username, c.Password) - if err != nil { - continue - } - - c.tokenCred.tokenMu.Lock() - c.tokenCred.token = resp.Token - c.tokenCred.tokenMu.Unlock() - - return nil - } - - return err -} - -func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { - opts := c.dialSetupOpts(endpoint, dopts...) - host := getHost(endpoint) - if c.Username != "" && c.Password != "" { - c.tokenCred = &authTokenCredential{ - tokenMu: &sync.RWMutex{}, - } - - ctx := c.ctx - if c.cfg.DialTimeout > 0 { - cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout) - defer cancel() - ctx = cctx - } - if err := c.getToken(ctx); err != nil { - if err == ctx.Err() && ctx.Err() != c.ctx.Err() { - err = grpc.ErrClientConnTimeout - } - return nil, err - } - - opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) - } - - // add metrics options - opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor)) - opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor)) - - conn, err := grpc.Dial(host, opts...) - if err != nil { - return nil, err - } - return conn, nil -} - -// WithRequireLeader requires client requests to only succeed -// when the cluster has a leader. -func WithRequireLeader(ctx context.Context) context.Context { - md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewContext(ctx, md) -} - -func newClient(cfg *Config) (*Client, error) { - if cfg == nil { - cfg = &Config{} - } - var creds *credentials.TransportCredentials - if cfg.TLS != nil { - c := credentials.NewTLS(cfg.TLS) - creds = &c - } - - // use a temporary skeleton client to bootstrap first connection - ctx, cancel := context.WithCancel(context.TODO()) - client := &Client{ - conn: nil, - cfg: *cfg, - creds: creds, - ctx: ctx, - cancel: cancel, - } - if cfg.Username != "" && cfg.Password != "" { - client.Username = cfg.Username - client.Password = cfg.Password - } - - client.balancer = newSimpleBalancer(cfg.Endpoints) - conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) - if err != nil { - client.cancel() - client.balancer.Close() - return nil, err - } - client.conn = conn - client.retryWrapper = client.newRetryWrapper() - client.retryAuthWrapper = client.newAuthRetryWrapper() - - // wait for a connection - if cfg.DialTimeout > 0 { - hasConn := false - waitc := time.After(cfg.DialTimeout) - select { - case <-client.balancer.readyc: - hasConn = true - case <-ctx.Done(): - case <-waitc: - } - if !hasConn { - client.cancel() - client.balancer.Close() - conn.Close() - return nil, grpc.ErrClientConnTimeout - } - } - - client.Cluster = NewCluster(client) - client.KV = NewKV(client) - client.Lease = NewLease(client) - client.Watcher = NewWatcher(client) - client.Auth = NewAuth(client) - client.Maintenance = NewMaintenance(client) - - go client.autoSync() - return client, nil -} - -// ActiveConnection returns the current in-use connection -func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } - -// isHaltErr returns true if the given error and context indicate no forward -// progress can be made, even after reconnecting. -func isHaltErr(ctx context.Context, err error) bool { - if ctx != nil && ctx.Err() != nil { - return true - } - if err == nil { - return false - } - code := grpc.Code(err) - // Unavailable codes mean the system will be right back. - // (e.g., can't connect, lost leader) - // Treat Internal codes as if something failed, leaving the - // system in an inconsistent state, but retrying could make progress. - // (e.g., failed in middle of send, corrupted frame) - // TODO: are permanent Internal errors possible from grpc? - return code != codes.Unavailable && code != codes.Internal -} - -func toErr(ctx context.Context, err error) error { - if err == nil { - return nil - } - err = rpctypes.Error(err) - if _, ok := err.(rpctypes.EtcdError); ok { - return err - } - code := grpc.Code(err) - switch code { - case codes.DeadlineExceeded: - fallthrough - case codes.Canceled: - if ctx.Err() != nil { - err = ctx.Err() - } - case codes.Unavailable: - err = ErrNoAvailableEndpoints - case codes.FailedPrecondition: - err = grpc.ErrClientConnClosing - } - return err -} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go deleted file mode 100644 index b9bff626bd7..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -type ( - Member pb.Member - MemberListResponse pb.MemberListResponse - MemberAddResponse pb.MemberAddResponse - MemberRemoveResponse pb.MemberRemoveResponse - MemberUpdateResponse pb.MemberUpdateResponse -) - -type Cluster interface { - // MemberList lists the current cluster membership. - MemberList(ctx context.Context) (*MemberListResponse, error) - - // MemberAdd adds a new member into the cluster. - MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) - - // MemberRemove removes an existing member from the cluster. - MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) - - // MemberUpdate updates the peer addresses of the member. - MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) -} - -type cluster struct { - remote pb.ClusterClient -} - -func NewCluster(c *Client) Cluster { - return &cluster{remote: RetryClusterClient(c)} -} - -func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { - r := &pb.MemberAddRequest{PeerURLs: peerAddrs} - resp, err := c.remote.MemberAdd(ctx, r) - if err == nil { - return (*MemberAddResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } - return nil, toErr(ctx, err) -} - -func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { - r := &pb.MemberRemoveRequest{ID: id} - resp, err := c.remote.MemberRemove(ctx, r) - if err == nil { - return (*MemberRemoveResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } - return nil, toErr(ctx, err) -} - -func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { - // it is safe to retry on update. - for { - r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false)) - if err == nil { - return (*MemberUpdateResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } - } -} - -func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { - // it is safe to retry on list. - for { - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false)) - if err == nil { - return (*MemberListResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } - } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go deleted file mode 100644 index 32d97eb0cc1..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -// CompactOp represents a compact operation. -type CompactOp struct { - revision int64 - physical bool -} - -// CompactOption configures compact operation. -type CompactOption func(*CompactOp) - -func (op *CompactOp) applyCompactOpts(opts []CompactOption) { - for _, opt := range opts { - opt(op) - } -} - -// OpCompact wraps slice CompactOption to create a CompactOp. -func OpCompact(rev int64, opts ...CompactOption) CompactOp { - ret := CompactOp{revision: rev} - ret.applyCompactOpts(opts) - return ret -} - -func (op CompactOp) toRequest() *pb.CompactionRequest { - return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} -} - -// WithCompactPhysical makes compact RPC call wait until -// the compaction is physically applied to the local database -// such that compacted entries are totally removed from the -// backend database. -func WithCompactPhysical() CompactOption { - return func(op *CompactOp) { op.physical = true } -} diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go deleted file mode 100644 index f89ffb52c4a..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/compare.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type CompareTarget int -type CompareResult int - -const ( - CompareVersion CompareTarget = iota - CompareCreated - CompareModified - CompareValue -) - -type Cmp pb.Compare - -func Compare(cmp Cmp, result string, v interface{}) Cmp { - var r pb.Compare_CompareResult - - switch result { - case "=": - r = pb.Compare_EQUAL - case "!=": - r = pb.Compare_NOT_EQUAL - case ">": - r = pb.Compare_GREATER - case "<": - r = pb.Compare_LESS - default: - panic("Unknown result op") - } - - cmp.Result = r - switch cmp.Target { - case pb.Compare_VALUE: - val, ok := v.(string) - if !ok { - panic("bad compare value") - } - cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)} - case pb.Compare_VERSION: - cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)} - case pb.Compare_CREATE: - cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} - case pb.Compare_MOD: - cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} - default: - panic("Unknown compare type") - } - return cmp -} - -func Value(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_VALUE} -} - -func Version(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_VERSION} -} - -func CreateRevision(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_CREATE} -} - -func ModRevision(key string) Cmp { - return Cmp{Key: []byte(key), Target: pb.Compare_MOD} -} - -func mustInt64(val interface{}) int64 { - if v, ok := val.(int64); ok { - return v - } - if v, ok := val.(int); ok { - return int64(v) - } - panic("bad value") -} diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go deleted file mode 100644 index d1d5f40906a..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "crypto/tls" - "crypto/x509" - "io/ioutil" - "time" - - "github.com/coreos/etcd/pkg/tlsutil" - "github.com/ghodss/yaml" -) - -type Config struct { - // Endpoints is a list of URLs - Endpoints []string - - // AutoSyncInterval is the interval to update endpoints with its latest members. - // 0 disables auto-sync. By default auto-sync is disabled. - AutoSyncInterval time.Duration - - // DialTimeout is the timeout for failing to establish a connection. - DialTimeout time.Duration - - // TLS holds the client secure credentials, if any. - TLS *tls.Config - - // Username is a username for authentication - Username string - - // Password is a password for authentication - Password string -} - -type yamlConfig struct { - Endpoints []string `json:"endpoints"` - AutoSyncInterval time.Duration `json:"auto-sync-interval"` - DialTimeout time.Duration `json:"dial-timeout"` - InsecureTransport bool `json:"insecure-transport"` - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"` - Certfile string `json:"cert-file"` - Keyfile string `json:"key-file"` - CAfile string `json:"ca-file"` -} - -func configFromFile(fpath string) (*Config, error) { - b, err := ioutil.ReadFile(fpath) - if err != nil { - return nil, err - } - - yc := &yamlConfig{} - - err = yaml.Unmarshal(b, yc) - if err != nil { - return nil, err - } - - cfg := &Config{ - Endpoints: yc.Endpoints, - AutoSyncInterval: yc.AutoSyncInterval, - DialTimeout: yc.DialTimeout, - } - - if yc.InsecureTransport { - cfg.TLS = nil - return cfg, nil - } - - var ( - cert *tls.Certificate - cp *x509.CertPool - ) - - if yc.Certfile != "" && yc.Keyfile != "" { - cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil) - if err != nil { - return nil, err - } - } - - if yc.CAfile != "" { - cp, err = tlsutil.NewCertPool([]string{yc.CAfile}) - if err != nil { - return nil, err - } - } - - tlscfg := &tls.Config{ - MinVersion: tls.VersionTLS10, - InsecureSkipVerify: yc.InsecureSkipTLSVerify, - RootCAs: cp, - } - if cert != nil { - tlscfg.Certificates = []tls.Certificate{*cert} - } - cfg.TLS = tlscfg - - return cfg, nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go deleted file mode 100644 index 470ca4dc476..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/doc.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package clientv3 implements the official Go etcd client for v3. -// -// Create client using `clientv3.New`: -// -// cli, err := clientv3.New(clientv3.Config{ -// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, -// DialTimeout: 5 * time.Second, -// }) -// if err != nil { -// // handle error! -// } -// defer cli.Close() -// -// Make sure to close the client after using it. If the client is not closed, the -// connection will have leaky goroutines. -// -// To specify client request timeout, pass context.WithTimeout to APIs: -// -// ctx, cancel := context.WithTimeout(context.Background(), timeout) -// resp, err := kvc.Put(ctx, "sample_key", "sample_value") -// cancel() -// if err != nil { -// // handle error! -// } -// // use the response -// -// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. -// Clients are safe for concurrent use by multiple goroutines. -// -// etcd client returns 2 types of errors: -// -// 1. context error: canceled or deadline exceeded. -// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go -// -// Here is the example code to handle client errors: -// -// resp, err := kvc.Put(ctx, "", "") -// if err != nil { -// if err == context.Canceled { -// // ctx is canceled by another routine -// } else if err == context.DeadlineExceeded { -// // ctx is attached with a deadline and it exceeded -// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok { -// // process (verr.Errors) -// } else { -// // bad cluster endpoints, which are not etcd servers -// } -// } -// -package clientv3 diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go deleted file mode 100644 index c8350f9268b..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -type ( - CompactResponse pb.CompactionResponse - PutResponse pb.PutResponse - GetResponse pb.RangeResponse - DeleteResponse pb.DeleteRangeResponse - TxnResponse pb.TxnResponse -) - -type KV interface { - // Put puts a key-value pair into etcd. - // Note that key,value can be plain bytes array and string is - // an immutable representation of that bytes array. - // To get a string of bytes, do string([]byte(0x10, 0x20)). - Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) - - // Get retrieves keys. - // By default, Get will return the value for "key", if any. - // When passed WithRange(end), Get will return the keys in the range [key, end). - // When passed WithFromKey(), Get returns keys greater than or equal to key. - // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision; - // if the required revision is compacted, the request will fail with ErrCompacted . - // When passed WithLimit(limit), the number of returned keys is bounded by limit. - // When passed WithSort(), the keys will be sorted. - Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) - - // Delete deletes a key, or optionally using WithRange(end), [key, end). - Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) - - // Compact compacts etcd KV history before the given rev. - Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) - - // Do applies a single Op on KV without a transaction. - // Do is useful when declaring operations to be issued at a later time - // whereas Get/Put/Delete are for better suited for when the operation - // should be immediately issued at time of declaration. - - // Do applies a single Op on KV without a transaction. - // Do is useful when creating arbitrary operations to be issued at a - // later time; the user can range over the operations, calling Do to - // execute them. Get/Put/Delete, on the other hand, are best suited - // for when the operation should be issued at the time of declaration. - Do(ctx context.Context, op Op) (OpResponse, error) - - // Txn creates a transaction. - Txn(ctx context.Context) Txn -} - -type OpResponse struct { - put *PutResponse - get *GetResponse - del *DeleteResponse -} - -func (op OpResponse) Put() *PutResponse { return op.put } -func (op OpResponse) Get() *GetResponse { return op.get } -func (op OpResponse) Del() *DeleteResponse { return op.del } - -type kv struct { - remote pb.KVClient -} - -func NewKV(c *Client) KV { - return &kv{remote: RetryKVClient(c)} -} - -func NewKVFromKVClient(remote pb.KVClient) KV { - return &kv{remote: remote} -} - -func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { - r, err := kv.Do(ctx, OpPut(key, val, opts...)) - return r.put, toErr(ctx, err) -} - -func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { - r, err := kv.Do(ctx, OpGet(key, opts...)) - return r.get, toErr(ctx, err) -} - -func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { - r, err := kv.Do(ctx, OpDelete(key, opts...)) - return r.del, toErr(ctx, err) -} - -func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { - resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest()) - if err != nil { - return nil, toErr(ctx, err) - } - return (*CompactResponse)(resp), err -} - -func (kv *kv) Txn(ctx context.Context) Txn { - return &txn{ - kv: kv, - ctx: ctx, - } -} - -func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { - for { - resp, err := kv.do(ctx, op) - if err == nil { - return resp, nil - } - - if isHaltErr(ctx, err) { - return resp, toErr(ctx, err) - } - // do not retry on modifications - if op.isWrite() { - return resp, toErr(ctx, err) - } - } -} - -func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { - var err error - switch op.t { - // TODO: handle other ops - case tRange: - var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) - if err == nil { - return OpResponse{get: (*GetResponse)(resp)}, nil - } - case tPut: - var resp *pb.PutResponse - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} - resp, err = kv.remote.Put(ctx, r) - if err == nil { - return OpResponse{put: (*PutResponse)(resp)}, nil - } - case tDeleteRange: - var resp *pb.DeleteRangeResponse - r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - resp, err = kv.remote.DeleteRange(ctx, r) - if err == nil { - return OpResponse{del: (*DeleteResponse)(resp)}, nil - } - default: - panic("Unknown op") - } - return OpResponse{}, err -} diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go deleted file mode 100644 index 10d3dd0b27f..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ /dev/null @@ -1,508 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "sync" - "time" - - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -type ( - LeaseRevokeResponse pb.LeaseRevokeResponse - LeaseID int64 -) - -// LeaseGrantResponse is used to convert the protobuf grant response. -type LeaseGrantResponse struct { - *pb.ResponseHeader - ID LeaseID - TTL int64 - Error string -} - -// LeaseKeepAliveResponse is used to convert the protobuf keepalive response. -type LeaseKeepAliveResponse struct { - *pb.ResponseHeader - ID LeaseID - TTL int64 -} - -// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response. -type LeaseTimeToLiveResponse struct { - *pb.ResponseHeader - ID LeaseID `json:"id"` - - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. - TTL int64 `json:"ttl"` - - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - GrantedTTL int64 `json:"granted-ttl"` - - // Keys is the list of keys attached to this lease. - Keys [][]byte `json:"keys"` -} - -const ( - // defaultTTL is the assumed lease TTL used for the first keepalive - // deadline before the actual TTL is known to the client. - defaultTTL = 5 * time.Second - // a small buffer to store unsent lease responses. - leaseResponseChSize = 16 - // NoLease is a lease ID for the absence of a lease. - NoLease LeaseID = 0 -) - -// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. -// -// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. -type ErrKeepAliveHalted struct { - Reason error -} - -func (e ErrKeepAliveHalted) Error() string { - s := "etcdclient: leases keep alive halted" - if e.Reason != nil { - s += ": " + e.Reason.Error() - } - return s -} - -type Lease interface { - // Grant creates a new lease. - Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) - - // Revoke revokes the given lease. - Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) - - // TimeToLive retrieves the lease information of the given lease ID. - TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) - - // KeepAlive keeps the given lease alive forever. - KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) - - // KeepAliveOnce renews the lease once. In most of the cases, Keepalive - // should be used instead of KeepAliveOnce. - KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) - - // Close releases all resources Lease keeps for efficient communication - // with the etcd server. - Close() error -} - -type lessor struct { - mu sync.Mutex // guards all fields - - // donec is closed and loopErr is set when recvKeepAliveLoop stops - donec chan struct{} - loopErr error - - remote pb.LeaseClient - - stream pb.Lease_LeaseKeepAliveClient - streamCancel context.CancelFunc - - stopCtx context.Context - stopCancel context.CancelFunc - - keepAlives map[LeaseID]*keepAlive - - // firstKeepAliveTimeout is the timeout for the first keepalive request - // before the actual TTL is known to the lease client - firstKeepAliveTimeout time.Duration -} - -// keepAlive multiplexes a keepalive for a lease over multiple channels -type keepAlive struct { - chs []chan<- *LeaseKeepAliveResponse - ctxs []context.Context - // deadline is the time the keep alive channels close if no response - deadline time.Time - // nextKeepAlive is when to send the next keep alive message - nextKeepAlive time.Time - // donec is closed on lease revoke, expiration, or cancel. - donec chan struct{} -} - -func NewLease(c *Client) Lease { - l := &lessor{ - donec: make(chan struct{}), - keepAlives: make(map[LeaseID]*keepAlive), - remote: RetryLeaseClient(c), - firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second, - } - if l.firstKeepAliveTimeout == time.Second { - l.firstKeepAliveTimeout = defaultTTL - } - - l.stopCtx, l.stopCancel = context.WithCancel(context.Background()) - go l.recvKeepAliveLoop() - go l.deadlineLoop() - return l -} - -func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := &pb.LeaseGrantRequest{TTL: ttl} - resp, err := l.remote.LeaseGrant(cctx, r) - if err == nil { - gresp := &LeaseGrantResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - Error: resp.Error, - } - return gresp, nil - } - if isHaltErr(cctx, err) { - return nil, toErr(cctx, err) - } - } -} - -func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := &pb.LeaseRevokeRequest{ID: int64(id)} - resp, err := l.remote.LeaseRevoke(cctx, r) - - if err == nil { - return (*LeaseRevokeResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } - } -} - -func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false)) - if err == nil { - gresp := &LeaseTimeToLiveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - GrantedTTL: resp.GrantedTTL, - Keys: resp.Keys, - } - return gresp, nil - } - if isHaltErr(cctx, err) { - return nil, toErr(cctx, err) - } - } -} - -func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { - ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize) - - l.mu.Lock() - // ensure that recvKeepAliveLoop is still running - select { - case <-l.donec: - err := l.loopErr - l.mu.Unlock() - close(ch) - return ch, ErrKeepAliveHalted{Reason: err} - default: - } - ka, ok := l.keepAlives[id] - if !ok { - // create fresh keep alive - ka = &keepAlive{ - chs: []chan<- *LeaseKeepAliveResponse{ch}, - ctxs: []context.Context{ctx}, - deadline: time.Now().Add(l.firstKeepAliveTimeout), - nextKeepAlive: time.Now(), - donec: make(chan struct{}), - } - l.keepAlives[id] = ka - } else { - // add channel and context to existing keep alive - ka.ctxs = append(ka.ctxs, ctx) - ka.chs = append(ka.chs, ch) - } - l.mu.Unlock() - - go l.keepAliveCtxCloser(id, ctx, ka.donec) - - return ch, nil -} - -func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - resp, err := l.keepAliveOnce(cctx, id) - if err == nil { - if resp.TTL == 0 { - err = rpctypes.ErrLeaseNotFound - } - return resp, err - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } - } -} - -func (l *lessor) Close() error { - l.stopCancel() - <-l.donec - return nil -} - -func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) { - select { - case <-donec: - return - case <-l.donec: - return - case <-ctx.Done(): - } - - l.mu.Lock() - defer l.mu.Unlock() - - ka, ok := l.keepAlives[id] - if !ok { - return - } - - // close channel and remove context if still associated with keep alive - for i, c := range ka.ctxs { - if c == ctx { - close(ka.chs[i]) - ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) - ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) - break - } - } - // remove if no one more listeners - if len(ka.chs) == 0 { - delete(l.keepAlives, id) - } -} - -func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - defer cancel() - - stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false)) - if err != nil { - return nil, toErr(ctx, err) - } - - err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) - if err != nil { - return nil, toErr(ctx, err) - } - - resp, rerr := stream.Recv() - if rerr != nil { - return nil, toErr(ctx, rerr) - } - - karesp := &LeaseKeepAliveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - } - return karesp, nil -} - -func (l *lessor) recvKeepAliveLoop() (gerr error) { - defer func() { - l.mu.Lock() - close(l.donec) - l.loopErr = gerr - for _, ka := range l.keepAlives { - ka.Close() - } - l.keepAlives = make(map[LeaseID]*keepAlive) - l.mu.Unlock() - }() - - stream, serr := l.resetRecv() - for serr == nil { - resp, err := stream.Recv() - if err != nil { - if isHaltErr(l.stopCtx, err) { - return err - } - stream, serr = l.resetRecv() - continue - } - l.recvKeepAlive(resp) - } - return serr -} - -// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests -func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { - sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false)) - if err = toErr(sctx, err); err != nil { - cancel() - return nil, err - } - - l.mu.Lock() - defer l.mu.Unlock() - if l.stream != nil && l.streamCancel != nil { - l.stream.CloseSend() - l.streamCancel() - } - - l.streamCancel = cancel - l.stream = stream - - go l.sendKeepAliveLoop(stream) - return stream, nil -} - -// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse -func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { - karesp := &LeaseKeepAliveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - } - - l.mu.Lock() - defer l.mu.Unlock() - - ka, ok := l.keepAlives[karesp.ID] - if !ok { - return - } - - if karesp.TTL <= 0 { - // lease expired; close all keep alive channels - delete(l.keepAlives, karesp.ID) - ka.Close() - return - } - - // send update to all channels - nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) - ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) - for _, ch := range ka.chs { - select { - case ch <- karesp: - ka.nextKeepAlive = nextKeepAlive - default: - } - } -} - -// deadlineLoop reaps any keep alive channels that have not received a response -// within the lease TTL -func (l *lessor) deadlineLoop() { - for { - select { - case <-time.After(time.Second): - case <-l.donec: - return - } - now := time.Now() - l.mu.Lock() - for id, ka := range l.keepAlives { - if ka.deadline.Before(now) { - // waited too long for response; lease may be expired - ka.Close() - delete(l.keepAlives, id) - } - } - l.mu.Unlock() - } -} - -// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream -func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { - for { - select { - case <-time.After(500 * time.Millisecond): - case <-stream.Context().Done(): - return - case <-l.donec: - return - case <-l.stopCtx.Done(): - return - } - - var tosend []LeaseID - - now := time.Now() - l.mu.Lock() - for id, ka := range l.keepAlives { - if ka.nextKeepAlive.Before(now) { - tosend = append(tosend, id) - } - } - l.mu.Unlock() - - for _, id := range tosend { - r := &pb.LeaseKeepAliveRequest{ID: int64(id)} - if err := stream.Send(r); err != nil { - // TODO do something with this error? - return - } - } - } -} - -func (ka *keepAlive) Close() { - close(ka.donec) - for _, ch := range ka.chs { - close(ch) - } -} - -// cancelWhenStop calls cancel when the given stopc fires. It returns a done chan. done -// should be closed when the work is finished. When done fires, cancelWhenStop will release -// its internal resource. -func cancelWhenStop(cancel context.CancelFunc, stopc <-chan struct{}) chan<- struct{} { - done := make(chan struct{}, 1) - - go func() { - select { - case <-stopc: - case <-done: - } - cancel() - }() - - return done -} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go deleted file mode 100644 index 519db45d8e3..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "io/ioutil" - "log" - "sync" - - "google.golang.org/grpc/grpclog" -) - -// Logger is the logger used by client library. -// It implements grpclog.Logger interface. -type Logger grpclog.Logger - -var ( - logger settableLogger -) - -type settableLogger struct { - l grpclog.Logger - mu sync.RWMutex -} - -func init() { - // disable client side logs by default - logger.mu.Lock() - logger.l = log.New(ioutil.Discard, "", 0) - - // logger has to override the grpclog at initialization so that - // any changes to the grpclog go through logger with locking - // instead of through SetLogger - // - // now updates only happen through settableLogger.set - grpclog.SetLogger(&logger) - logger.mu.Unlock() -} - -// SetLogger sets client-side Logger. By default, logs are disabled. -func SetLogger(l Logger) { - logger.set(l) -} - -// GetLogger returns the current logger. -func GetLogger() Logger { - return logger.get() -} - -func (s *settableLogger) set(l Logger) { - s.mu.Lock() - logger.l = l - s.mu.Unlock() -} - -func (s *settableLogger) get() Logger { - s.mu.RLock() - l := logger.l - s.mu.RUnlock() - return l -} - -// implement the grpclog.Logger interface - -func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } -func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } -func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } -func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) } -func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) } diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go deleted file mode 100644 index 718356250be..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "io" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -type ( - DefragmentResponse pb.DefragmentResponse - AlarmResponse pb.AlarmResponse - AlarmMember pb.AlarmMember - StatusResponse pb.StatusResponse -) - -type Maintenance interface { - // AlarmList gets all active alarms. - AlarmList(ctx context.Context) (*AlarmResponse, error) - - // AlarmDisarm disarms a given alarm. - AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) - - // Defragment defragments storage backend of the etcd member with given endpoint. - // Defragment is only needed when deleting a large number of keys and want to reclaim - // the resources. - // Defragment is an expensive operation. User should avoid defragmenting multiple members - // at the same time. - // To defragment multiple members in the cluster, user need to call defragment multiple - // times with different endpoints. - Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) - - // Status gets the status of the endpoint. - Status(ctx context.Context, endpoint string) (*StatusResponse, error) - - // Snapshot provides a reader for a snapshot of a backend. - Snapshot(ctx context.Context) (io.ReadCloser, error) -} - -type maintenance struct { - c *Client - remote pb.MaintenanceClient -} - -func NewMaintenance(c *Client) Maintenance { - return &maintenance{c: c, remote: pb.NewMaintenanceClient(c.conn)} -} - -func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { - req := &pb.AlarmRequest{ - Action: pb.AlarmRequest_GET, - MemberID: 0, // all - Alarm: pb.AlarmType_NONE, // all - } - for { - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) - if err == nil { - return (*AlarmResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } - } -} - -func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { - req := &pb.AlarmRequest{ - Action: pb.AlarmRequest_DEACTIVATE, - MemberID: am.MemberID, - Alarm: am.Alarm, - } - - if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { - ar, err := m.AlarmList(ctx) - if err != nil { - return nil, toErr(ctx, err) - } - ret := AlarmResponse{} - for _, am := range ar.Alarms { - dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) - if derr != nil { - return nil, toErr(ctx, derr) - } - ret.Alarms = append(ret.Alarms, dresp.Alarms...) - } - return &ret, nil - } - - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) - if err == nil { - return (*AlarmResponse)(resp), nil - } - return nil, toErr(ctx, err) -} - -func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { - conn, err := m.c.Dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer conn.Close() - remote := pb.NewMaintenanceClient(conn) - resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false)) - if err != nil { - return nil, toErr(ctx, err) - } - return (*DefragmentResponse)(resp), nil -} - -func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { - conn, err := m.c.Dial(endpoint) - if err != nil { - return nil, toErr(ctx, err) - } - defer conn.Close() - remote := pb.NewMaintenanceClient(conn) - resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false)) - if err != nil { - return nil, toErr(ctx, err) - } - return (*StatusResponse)(resp), nil -} - -func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false)) - if err != nil { - return nil, toErr(ctx, err) - } - - pr, pw := io.Pipe() - go func() { - for { - resp, err := ss.Recv() - if err != nil { - pw.CloseWithError(err) - return - } - if resp == nil && err == nil { - break - } - if _, werr := pw.Write(resp.Blob); werr != nil { - pw.CloseWithError(werr) - return - } - } - pw.Close() - }() - return pr, nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go deleted file mode 100644 index 6e260076698..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - -type opType int - -const ( - // A default Op has opType 0, which is invalid. - tRange opType = iota + 1 - tPut - tDeleteRange -) - -var ( - noPrefixEnd = []byte{0} -) - -// Op represents an Operation that kv can execute. -type Op struct { - t opType - key []byte - end []byte - - // for range - limit int64 - sort *SortOption - serializable bool - keysOnly bool - countOnly bool - minModRev int64 - maxModRev int64 - minCreateRev int64 - maxCreateRev int64 - - // for range, watch - rev int64 - - // for watch, put, delete - prevKV bool - - // progressNotify is for progress updates. - progressNotify bool - // createdNotify is for created event - createdNotify bool - // filters for watchers - filterPut bool - filterDelete bool - - // for put - val []byte - leaseID LeaseID -} - -func (op Op) toRangeRequest() *pb.RangeRequest { - if op.t != tRange { - panic("op.t != tRange") - } - r := &pb.RangeRequest{ - Key: op.key, - RangeEnd: op.end, - Limit: op.limit, - Revision: op.rev, - Serializable: op.serializable, - KeysOnly: op.keysOnly, - CountOnly: op.countOnly, - MinModRevision: op.minModRev, - MaxModRevision: op.maxModRev, - MinCreateRevision: op.minCreateRev, - MaxCreateRevision: op.maxCreateRev, - } - if op.sort != nil { - r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) - r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) - } - return r -} - -func (op Op) toRequestOp() *pb.RequestOp { - switch op.t { - case tRange: - return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} - case tPut: - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} - return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} - case tDeleteRange: - r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} - default: - panic("Unknown Op") - } -} - -func (op Op) isWrite() bool { - return op.t != tRange -} - -func OpGet(key string, opts ...OpOption) Op { - ret := Op{t: tRange, key: []byte(key)} - ret.applyOpts(opts) - return ret -} - -func OpDelete(key string, opts ...OpOption) Op { - ret := Op{t: tDeleteRange, key: []byte(key)} - ret.applyOpts(opts) - switch { - case ret.leaseID != 0: - panic("unexpected lease in delete") - case ret.limit != 0: - panic("unexpected limit in delete") - case ret.rev != 0: - panic("unexpected revision in delete") - case ret.sort != nil: - panic("unexpected sort in delete") - case ret.serializable: - panic("unexpected serializable in delete") - case ret.countOnly: - panic("unexpected countOnly in delete") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in delete") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in delete") - case ret.filterDelete, ret.filterPut: - panic("unexpected filter in delete") - case ret.createdNotify: - panic("unexpected createdNotify in delete") - } - return ret -} - -func OpPut(key, val string, opts ...OpOption) Op { - ret := Op{t: tPut, key: []byte(key), val: []byte(val)} - ret.applyOpts(opts) - switch { - case ret.end != nil: - panic("unexpected range in put") - case ret.limit != 0: - panic("unexpected limit in put") - case ret.rev != 0: - panic("unexpected revision in put") - case ret.sort != nil: - panic("unexpected sort in put") - case ret.serializable: - panic("unexpected serializable in put") - case ret.countOnly: - panic("unexpected countOnly in put") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in put") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in put") - case ret.filterDelete, ret.filterPut: - panic("unexpected filter in put") - case ret.createdNotify: - panic("unexpected createdNotify in put") - } - return ret -} - -func opWatch(key string, opts ...OpOption) Op { - ret := Op{t: tRange, key: []byte(key)} - ret.applyOpts(opts) - switch { - case ret.leaseID != 0: - panic("unexpected lease in watch") - case ret.limit != 0: - panic("unexpected limit in watch") - case ret.sort != nil: - panic("unexpected sort in watch") - case ret.serializable: - panic("unexpected serializable in watch") - case ret.countOnly: - panic("unexpected countOnly in watch") - case ret.minModRev != 0, ret.maxModRev != 0: - panic("unexpected mod revision filter in watch") - case ret.minCreateRev != 0, ret.maxCreateRev != 0: - panic("unexpected create revision filter in watch") - } - return ret -} - -func (op *Op) applyOpts(opts []OpOption) { - for _, opt := range opts { - opt(op) - } -} - -// OpOption configures Operations like Get, Put, Delete. -type OpOption func(*Op) - -// WithLease attaches a lease ID to a key in 'Put' request. -func WithLease(leaseID LeaseID) OpOption { - return func(op *Op) { op.leaseID = leaseID } -} - -// WithLimit limits the number of results to return from 'Get' request. -func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } - -// WithRev specifies the store revision for 'Get' request. -// Or the start revision of 'Watch' request. -func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } - -// WithSort specifies the ordering in 'Get' request. It requires -// 'WithRange' and/or 'WithPrefix' to be specified too. -// 'target' specifies the target to sort by: key, version, revisions, value. -// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'. -func WithSort(target SortTarget, order SortOrder) OpOption { - return func(op *Op) { - if target == SortByKey && order == SortAscend { - // If order != SortNone, server fetches the entire key-space, - // and then applies the sort and limit, if provided. - // Since current mvcc.Range implementation returns results - // sorted by keys in lexicographically ascending order, - // client should ignore SortOrder if the target is SortByKey. - order = SortNone - } - op.sort = &SortOption{target, order} - } -} - -// GetPrefixRangeEnd gets the range end of the prefix. -// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. -func GetPrefixRangeEnd(prefix string) string { - return string(getPrefix([]byte(prefix))) -} - -func getPrefix(key []byte) []byte { - end := make([]byte, len(key)) - copy(end, key) - for i := len(end) - 1; i >= 0; i-- { - if end[i] < 0xff { - end[i] = end[i] + 1 - end = end[:i+1] - return end - } - } - // next prefix does not exist (e.g., 0xffff); - // default to WithFromKey policy - return noPrefixEnd -} - -// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate -// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())' -// can return 'foo1', 'foo2', and so on. -func WithPrefix() OpOption { - return func(op *Op) { - op.end = getPrefix(op.key) - } -} - -// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests. -// For example, 'Get' requests with 'WithRange(end)' returns -// the keys in the range [key, end). -// endKey must be lexicographically greater than start key. -func WithRange(endKey string) OpOption { - return func(op *Op) { op.end = []byte(endKey) } -} - -// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests -// to be equal or greater than the key in the argument. -func WithFromKey() OpOption { return WithRange("\x00") } - -// WithSerializable makes 'Get' request serializable. By default, -// it's linearizable. Serializable requests are better for lower latency -// requirement. -func WithSerializable() OpOption { - return func(op *Op) { op.serializable = true } -} - -// WithKeysOnly makes the 'Get' request return only the keys and the corresponding -// values will be omitted. -func WithKeysOnly() OpOption { - return func(op *Op) { op.keysOnly = true } -} - -// WithCountOnly makes the 'Get' request return only the count of keys. -func WithCountOnly() OpOption { - return func(op *Op) { op.countOnly = true } -} - -// WithMinModRev filters out keys for Get with modification revisions less than the given revision. -func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } } - -// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision. -func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } } - -// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision. -func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } } - -// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. -func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } - -// WithFirstCreate gets the key with the oldest creation revision in the request range. -func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } - -// WithLastCreate gets the key with the latest creation revision in the request range. -func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) } - -// WithFirstKey gets the lexically first key in the request range. -func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) } - -// WithLastKey gets the lexically last key in the request range. -func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) } - -// WithFirstRev gets the key with the oldest modification revision in the request range. -func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) } - -// WithLastRev gets the key with the latest modification revision in the request range. -func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) } - -// withTop gets the first key over the get's prefix given a sort order -func withTop(target SortTarget, order SortOrder) []OpOption { - return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} -} - -// WithProgressNotify makes watch server send periodic progress updates -// every 10 minutes when there is no incoming events. -// Progress updates have zero events in WatchResponse. -func WithProgressNotify() OpOption { - return func(op *Op) { - op.progressNotify = true - } -} - -// WithCreatedNotify makes watch server sends the created event. -func WithCreatedNotify() OpOption { - return func(op *Op) { - op.createdNotify = true - } -} - -// WithFilterPut discards PUT events from the watcher. -func WithFilterPut() OpOption { - return func(op *Op) { op.filterPut = true } -} - -// WithFilterDelete discards DELETE events from the watcher. -func WithFilterDelete() OpOption { - return func(op *Op) { op.filterDelete = true } -} - -// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted, -// nothing will be returned. -func WithPrevKV() OpOption { - return func(op *Op) { - op.prevKV = true - } -} - -// LeaseOp represents an Operation that lease can execute. -type LeaseOp struct { - id LeaseID - - // for TimeToLive - attachedKeys bool -} - -// LeaseOption configures lease operations. -type LeaseOption func(*LeaseOp) - -func (op *LeaseOp) applyOpts(opts []LeaseOption) { - for _, opt := range opts { - opt(op) - } -} - -// WithAttachedKeys requests lease timetolive API to return -// attached keys of given lease ID. -func WithAttachedKeys() LeaseOption { - return func(op *LeaseOp) { op.attachedKeys = true } -} - -func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest { - ret := &LeaseOp{id: id} - ret.applyOpts(opts) - return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} -} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go deleted file mode 100644 index 78f31a8c4b0..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -type rpcFunc func(ctx context.Context) error -type retryRpcFunc func(context.Context, rpcFunc) error - -func (c *Client) newRetryWrapper() retryRpcFunc { - return func(rpcCtx context.Context, f rpcFunc) error { - for { - err := f(rpcCtx) - if err == nil { - return nil - } - - eErr := rpctypes.Error(err) - // always stop retry on etcd errors - if _, ok := eErr.(rpctypes.EtcdError); ok { - return err - } - - // only retry if unavailable - if grpc.Code(err) != codes.Unavailable { - return err - } - - select { - case <-c.balancer.ConnectNotify(): - case <-rpcCtx.Done(): - return rpcCtx.Err() - case <-c.ctx.Done(): - return c.ctx.Err() - } - } - } -} - -func (c *Client) newAuthRetryWrapper() retryRpcFunc { - return func(rpcCtx context.Context, f rpcFunc) error { - for { - err := f(rpcCtx) - if err == nil { - return nil - } - - // always stop retry on etcd errors other than invalid auth token - if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { - gterr := c.getToken(rpcCtx) - if gterr != nil { - return err // return the original error for simplicity - } - continue - } - - return err - } - } -} - -// RetryKVClient implements a KVClient that uses the client's FailFast retry policy. -func RetryKVClient(c *Client) pb.KVClient { - retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper} - return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}} -} - -type retryKVClient struct { - *retryWriteKVClient -} - -func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...) - return err - }) - return resp, err -} - -type retryWriteKVClient struct { - pb.KVClient - retryf retryRpcFunc -} - -func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Put(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Txn(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Compact(rctx, in, opts...) - return err - }) - return resp, err -} - -type retryLeaseClient struct { - pb.LeaseClient - retryf retryRpcFunc -} - -// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy. -func RetryLeaseClient(c *Client) pb.LeaseClient { - retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper} - return &retryLeaseClient{retry, c.retryAuthWrapper} -} - -func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...) - return err - }) - return resp, err - -} - -func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...) - return err - }) - return resp, err -} - -type retryClusterClient struct { - pb.ClusterClient - retryf retryRpcFunc -} - -// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy. -func RetryClusterClient(c *Client) pb.ClusterClient { - return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper} -} - -func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...) - return err - }) - return resp, err -} - -type retryAuthClient struct { - pb.AuthClient - retryf retryRpcFunc -} - -// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy. -func RetryAuthClient(c *Client) pb.AuthClient { - return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper} -} - -func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserAdd(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserDelete(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...) - return err - }) - return resp, err -} - -func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...) - return err - }) - return resp, err -} diff --git a/vendor/github.com/coreos/etcd/clientv3/sort.go b/vendor/github.com/coreos/etcd/clientv3/sort.go deleted file mode 100644 index 2bb9d9a13b7..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/sort.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -type SortTarget int -type SortOrder int - -const ( - SortNone SortOrder = iota - SortAscend - SortDescend -) - -const ( - SortByKey SortTarget = iota - SortByVersion - SortByCreateRevision - SortByModRevision - SortByValue -) - -type SortOption struct { - Target SortTarget - Order SortOrder -} diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go deleted file mode 100644 index a61decd6406..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "sync" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -// Txn is the interface that wraps mini-transactions. -// -// Tx.If( -// Compare(Value(k1), ">", v1), -// Compare(Version(k1), "=", 2) -// ).Then( -// OpPut(k2,v2), OpPut(k3,v3) -// ).Else( -// OpPut(k4,v4), OpPut(k5,v5) -// ).Commit() -// -type Txn interface { - // If takes a list of comparison. If all comparisons passed in succeed, - // the operations passed into Then() will be executed. Or the operations - // passed into Else() will be executed. - If(cs ...Cmp) Txn - - // Then takes a list of operations. The Ops list will be executed, if the - // comparisons passed in If() succeed. - Then(ops ...Op) Txn - - // Else takes a list of operations. The Ops list will be executed, if the - // comparisons passed in If() fail. - Else(ops ...Op) Txn - - // Commit tries to commit the transaction. - Commit() (*TxnResponse, error) - - // TODO: add a Do for shortcut the txn without any condition? -} - -type txn struct { - kv *kv - ctx context.Context - - mu sync.Mutex - cif bool - cthen bool - celse bool - - isWrite bool - - cmps []*pb.Compare - - sus []*pb.RequestOp - fas []*pb.RequestOp -} - -func (txn *txn) If(cs ...Cmp) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.cif { - panic("cannot call If twice!") - } - - if txn.cthen { - panic("cannot call If after Then!") - } - - if txn.celse { - panic("cannot call If after Else!") - } - - txn.cif = true - - for i := range cs { - txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) - } - - return txn -} - -func (txn *txn) Then(ops ...Op) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.cthen { - panic("cannot call Then twice!") - } - if txn.celse { - panic("cannot call Then after Else!") - } - - txn.cthen = true - - for _, op := range ops { - txn.isWrite = txn.isWrite || op.isWrite() - txn.sus = append(txn.sus, op.toRequestOp()) - } - - return txn -} - -func (txn *txn) Else(ops ...Op) Txn { - txn.mu.Lock() - defer txn.mu.Unlock() - - if txn.celse { - panic("cannot call Else twice!") - } - - txn.celse = true - - for _, op := range ops { - txn.isWrite = txn.isWrite || op.isWrite() - txn.fas = append(txn.fas, op.toRequestOp()) - } - - return txn -} - -func (txn *txn) Commit() (*TxnResponse, error) { - txn.mu.Lock() - defer txn.mu.Unlock() - for { - resp, err := txn.commit() - if err == nil { - return resp, err - } - if isHaltErr(txn.ctx, err) { - return nil, toErr(txn.ctx, err) - } - if txn.isWrite { - return nil, toErr(txn.ctx, err) - } - } -} - -func (txn *txn) commit() (*TxnResponse, error) { - r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - - var opts []grpc.CallOption - if !txn.isWrite { - opts = []grpc.CallOption{grpc.FailFast(false)} - } - resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...) - if err != nil { - return nil, err - } - return (*TxnResponse)(resp), nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go deleted file mode 100644 index 9b083cc9462..00000000000 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ /dev/null @@ -1,785 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "fmt" - "sync" - "time" - - v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -const ( - EventTypeDelete = mvccpb.DELETE - EventTypePut = mvccpb.PUT - - closeSendErrTimeout = 250 * time.Millisecond -) - -type Event mvccpb.Event - -type WatchChan <-chan WatchResponse - -type Watcher interface { - // Watch watches on a key or prefix. The watched events will be returned - // through the returned channel. - // If the watch is slow or the required rev is compacted, the watch request - // might be canceled from the server-side and the chan will be closed. - // 'opts' can be: 'WithRev' and/or 'WithPrefix'. - Watch(ctx context.Context, key string, opts ...OpOption) WatchChan - - // Close closes the watcher and cancels all watch requests. - Close() error -} - -type WatchResponse struct { - Header pb.ResponseHeader - Events []*Event - - // CompactRevision is the minimum revision the watcher may receive. - CompactRevision int64 - - // Canceled is used to indicate watch failure. - // If the watch failed and the stream was about to close, before the channel is closed, - // the channel sends a final response that has Canceled set to true with a non-nil Err(). - Canceled bool - - // Created is used to indicate the creation of the watcher. - Created bool - - closeErr error -} - -// IsCreate returns true if the event tells that the key is newly created. -func (e *Event) IsCreate() bool { - return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision -} - -// IsModify returns true if the event tells that a new value is put on existing key. -func (e *Event) IsModify() bool { - return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision -} - -// Err is the error value if this WatchResponse holds an error. -func (wr *WatchResponse) Err() error { - switch { - case wr.closeErr != nil: - return v3rpc.Error(wr.closeErr) - case wr.CompactRevision != 0: - return v3rpc.ErrCompacted - case wr.Canceled: - return v3rpc.ErrFutureRev - } - return nil -} - -// IsProgressNotify returns true if the WatchResponse is progress notification. -func (wr *WatchResponse) IsProgressNotify() bool { - return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 -} - -// watcher implements the Watcher interface -type watcher struct { - remote pb.WatchClient - - // mu protects the grpc streams map - mu sync.RWMutex - - // streams holds all the active grpc streams keyed by ctx value. - streams map[string]*watchGrpcStream -} - -// watchGrpcStream tracks all watch resources attached to a single grpc stream. -type watchGrpcStream struct { - owner *watcher - remote pb.WatchClient - - // ctx controls internal remote.Watch requests - ctx context.Context - // ctxKey is the key used when looking up this stream's context - ctxKey string - cancel context.CancelFunc - - // substreams holds all active watchers on this grpc stream - substreams map[int64]*watcherStream - // resuming holds all resuming watchers on this grpc stream - resuming []*watcherStream - - // reqc sends a watch request from Watch() to the main goroutine - reqc chan *watchRequest - // respc receives data from the watch client - respc chan *pb.WatchResponse - // donec closes to broadcast shutdown - donec chan struct{} - // errc transmits errors from grpc Recv to the watch stream reconn logic - errc chan error - // closingc gets the watcherStream of closing watchers - closingc chan *watcherStream - // wg is Done when all substream goroutines have exited - wg sync.WaitGroup - - // resumec closes to signal that all substreams should begin resuming - resumec chan struct{} - // closeErr is the error that closed the watch stream - closeErr error -} - -// watchRequest is issued by the subscriber to start a new watcher -type watchRequest struct { - ctx context.Context - key string - end string - rev int64 - // send created notification event if this field is true - createdNotify bool - // progressNotify is for progress updates - progressNotify bool - // filters is the list of events to filter out - filters []pb.WatchCreateRequest_FilterType - // get the previous key-value pair before the event happens - prevKV bool - // retc receives a chan WatchResponse once the watcher is established - retc chan chan WatchResponse -} - -// watcherStream represents a registered watcher -type watcherStream struct { - // initReq is the request that initiated this request - initReq watchRequest - - // outc publishes watch responses to subscriber - outc chan WatchResponse - // recvc buffers watch responses before publishing - recvc chan *WatchResponse - // donec closes when the watcherStream goroutine stops. - donec chan struct{} - // closing is set to true when stream should be scheduled to shutdown. - closing bool - // id is the registered watch id on the grpc stream - id int64 - - // buf holds all events received from etcd but not yet consumed by the client - buf []*WatchResponse -} - -func NewWatcher(c *Client) Watcher { - return NewWatchFromWatchClient(pb.NewWatchClient(c.conn)) -} - -func NewWatchFromWatchClient(wc pb.WatchClient) Watcher { - return &watcher{ - remote: wc, - streams: make(map[string]*watchGrpcStream), - } -} - -// never closes -var valCtxCh = make(chan struct{}) -var zeroTime = time.Unix(0, 0) - -// ctx with only the values; never Done -type valCtx struct{ context.Context } - -func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } -func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } -func (vc *valCtx) Err() error { return nil } - -func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { - ctx, cancel := context.WithCancel(&valCtx{inctx}) - wgs := &watchGrpcStream{ - owner: w, - remote: w.remote, - ctx: ctx, - ctxKey: fmt.Sprintf("%v", inctx), - cancel: cancel, - substreams: make(map[int64]*watcherStream), - - respc: make(chan *pb.WatchResponse), - reqc: make(chan *watchRequest), - donec: make(chan struct{}), - errc: make(chan error, 1), - closingc: make(chan *watcherStream), - resumec: make(chan struct{}), - } - go wgs.run() - return wgs -} - -// Watch posts a watch request to run() and waits for a new watcher channel -func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { - ow := opWatch(key, opts...) - - var filters []pb.WatchCreateRequest_FilterType - if ow.filterPut { - filters = append(filters, pb.WatchCreateRequest_NOPUT) - } - if ow.filterDelete { - filters = append(filters, pb.WatchCreateRequest_NODELETE) - } - - wr := &watchRequest{ - ctx: ctx, - createdNotify: ow.createdNotify, - key: string(ow.key), - end: string(ow.end), - rev: ow.rev, - progressNotify: ow.progressNotify, - filters: filters, - prevKV: ow.prevKV, - retc: make(chan chan WatchResponse, 1), - } - - ok := false - ctxKey := fmt.Sprintf("%v", ctx) - - // find or allocate appropriate grpc watch stream - w.mu.Lock() - if w.streams == nil { - // closed - w.mu.Unlock() - ch := make(chan WatchResponse) - close(ch) - return ch - } - wgs := w.streams[ctxKey] - if wgs == nil { - wgs = w.newWatcherGrpcStream(ctx) - w.streams[ctxKey] = wgs - } - donec := wgs.donec - reqc := wgs.reqc - w.mu.Unlock() - - // couldn't create channel; return closed channel - closeCh := make(chan WatchResponse, 1) - - // submit request - select { - case reqc <- wr: - ok = true - case <-wr.ctx.Done(): - case <-donec: - if wgs.closeErr != nil { - closeCh <- WatchResponse{closeErr: wgs.closeErr} - break - } - // retry; may have dropped stream from no ctxs - return w.Watch(ctx, key, opts...) - } - - // receive channel - if ok { - select { - case ret := <-wr.retc: - return ret - case <-ctx.Done(): - case <-donec: - if wgs.closeErr != nil { - closeCh <- WatchResponse{closeErr: wgs.closeErr} - break - } - // retry; may have dropped stream from no ctxs - return w.Watch(ctx, key, opts...) - } - } - - close(closeCh) - return closeCh -} - -func (w *watcher) Close() (err error) { - w.mu.Lock() - streams := w.streams - w.streams = nil - w.mu.Unlock() - for _, wgs := range streams { - if werr := wgs.Close(); werr != nil { - err = werr - } - } - return err -} - -func (w *watchGrpcStream) Close() (err error) { - w.cancel() - <-w.donec - select { - case err = <-w.errc: - default: - } - return toErr(w.ctx, err) -} - -func (w *watcher) closeStream(wgs *watchGrpcStream) { - w.mu.Lock() - close(wgs.donec) - wgs.cancel() - if w.streams != nil { - delete(w.streams, wgs.ctxKey) - } - w.mu.Unlock() -} - -func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { - if resp.WatchId == -1 { - // failed; no channel - close(ws.recvc) - return - } - ws.id = resp.WatchId - w.substreams[ws.id] = ws -} - -func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { - select { - case ws.outc <- *resp: - case <-ws.initReq.ctx.Done(): - case <-time.After(closeSendErrTimeout): - } - close(ws.outc) -} - -func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { - // send channel response in case stream was never established - select { - case ws.initReq.retc <- ws.outc: - default: - } - // close subscriber's channel - if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { - go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr}) - } else if ws.outc != nil { - close(ws.outc) - } - if ws.id != -1 { - delete(w.substreams, ws.id) - return - } - for i := range w.resuming { - if w.resuming[i] == ws { - w.resuming[i] = nil - return - } - } -} - -// run is the root of the goroutines for managing a watcher client -func (w *watchGrpcStream) run() { - var wc pb.Watch_WatchClient - var closeErr error - - // substreams marked to close but goroutine still running; needed for - // avoiding double-closing recvc on grpc stream teardown - closing := make(map[*watcherStream]struct{}) - - defer func() { - w.closeErr = closeErr - // shutdown substreams and resuming substreams - for _, ws := range w.substreams { - if _, ok := closing[ws]; !ok { - close(ws.recvc) - closing[ws] = struct{}{} - } - } - for _, ws := range w.resuming { - if _, ok := closing[ws]; ws != nil && !ok { - close(ws.recvc) - closing[ws] = struct{}{} - } - } - w.joinSubstreams() - for range closing { - w.closeSubstream(<-w.closingc) - } - w.wg.Wait() - w.owner.closeStream(w) - }() - - // start a stream with the etcd grpc server - if wc, closeErr = w.newWatchClient(); closeErr != nil { - return - } - - cancelSet := make(map[int64]struct{}) - - for { - select { - // Watch() requested - case wreq := <-w.reqc: - outc := make(chan WatchResponse, 1) - ws := &watcherStream{ - initReq: *wreq, - id: -1, - outc: outc, - // unbufffered so resumes won't cause repeat events - recvc: make(chan *WatchResponse), - } - - ws.donec = make(chan struct{}) - w.wg.Add(1) - go w.serveSubstream(ws, w.resumec) - - // queue up for watcher creation/resume - w.resuming = append(w.resuming, ws) - if len(w.resuming) == 1 { - // head of resume queue, can register a new watcher - wc.Send(ws.initReq.toPB()) - } - // New events from the watch client - case pbresp := <-w.respc: - switch { - case pbresp.Created: - // response to head of queue creation - if ws := w.resuming[0]; ws != nil { - w.addSubstream(pbresp, ws) - w.dispatchEvent(pbresp) - w.resuming[0] = nil - } - if ws := w.nextResume(); ws != nil { - wc.Send(ws.initReq.toPB()) - } - case pbresp.Canceled: - delete(cancelSet, pbresp.WatchId) - if ws, ok := w.substreams[pbresp.WatchId]; ok { - // signal to stream goroutine to update closingc - close(ws.recvc) - closing[ws] = struct{}{} - } - default: - // dispatch to appropriate watch stream - if ok := w.dispatchEvent(pbresp); ok { - break - } - // watch response on unexpected watch id; cancel id - if _, ok := cancelSet[pbresp.WatchId]; ok { - break - } - cancelSet[pbresp.WatchId] = struct{}{} - cr := &pb.WatchRequest_CancelRequest{ - CancelRequest: &pb.WatchCancelRequest{ - WatchId: pbresp.WatchId, - }, - } - req := &pb.WatchRequest{RequestUnion: cr} - wc.Send(req) - } - // watch client failed to recv; spawn another if possible - case err := <-w.errc: - if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { - closeErr = err - return - } - if wc, closeErr = w.newWatchClient(); closeErr != nil { - return - } - if ws := w.nextResume(); ws != nil { - wc.Send(ws.initReq.toPB()) - } - cancelSet = make(map[int64]struct{}) - case <-w.ctx.Done(): - return - case ws := <-w.closingc: - w.closeSubstream(ws) - delete(closing, ws) - if len(w.substreams)+len(w.resuming) == 0 { - // no more watchers on this stream, shutdown - return - } - } - } -} - -// nextResume chooses the next resuming to register with the grpc stream. Abandoned -// streams are marked as nil in the queue since the head must wait for its inflight registration. -func (w *watchGrpcStream) nextResume() *watcherStream { - for len(w.resuming) != 0 { - if w.resuming[0] != nil { - return w.resuming[0] - } - w.resuming = w.resuming[1:len(w.resuming)] - } - return nil -} - -// dispatchEvent sends a WatchResponse to the appropriate watcher stream -func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { - ws, ok := w.substreams[pbresp.WatchId] - if !ok { - return false - } - events := make([]*Event, len(pbresp.Events)) - for i, ev := range pbresp.Events { - events[i] = (*Event)(ev) - } - wr := &WatchResponse{ - Header: *pbresp.Header, - Events: events, - CompactRevision: pbresp.CompactRevision, - Created: pbresp.Created, - Canceled: pbresp.Canceled, - } - select { - case ws.recvc <- wr: - case <-ws.donec: - return false - } - return true -} - -// serveWatchClient forwards messages from the grpc stream to run() -func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { - for { - resp, err := wc.Recv() - if err != nil { - select { - case w.errc <- err: - case <-w.donec: - } - return - } - select { - case w.respc <- resp: - case <-w.donec: - return - } - } -} - -// serveSubstream forwards watch responses from run() to the subscriber -func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { - if ws.closing { - panic("created substream goroutine but substream is closing") - } - - // nextRev is the minimum expected next revision - nextRev := ws.initReq.rev - resuming := false - defer func() { - if !resuming { - ws.closing = true - } - close(ws.donec) - if !resuming { - w.closingc <- ws - } - w.wg.Done() - }() - - emptyWr := &WatchResponse{} - for { - curWr := emptyWr - outc := ws.outc - - if len(ws.buf) > 0 { - curWr = ws.buf[0] - } else { - outc = nil - } - select { - case outc <- *curWr: - if ws.buf[0].Err() != nil { - return - } - ws.buf[0] = nil - ws.buf = ws.buf[1:] - case wr, ok := <-ws.recvc: - if !ok { - // shutdown from closeSubstream - return - } - - if wr.Created { - if ws.initReq.retc != nil { - ws.initReq.retc <- ws.outc - // to prevent next write from taking the slot in buffered channel - // and posting duplicate create events - ws.initReq.retc = nil - - // send first creation event only if requested - if ws.initReq.createdNotify { - ws.outc <- *wr - } - // once the watch channel is returned, a current revision - // watch must resume at the store revision. This is necessary - // for the following case to work as expected: - // wch := m1.Watch("a") - // m2.Put("a", "b") - // <-wch - // If the revision is only bound on the first observed event, - // if wch is disconnected before the Put is issued, then reconnects - // after it is committed, it'll miss the Put. - if ws.initReq.rev == 0 { - nextRev = wr.Header.Revision - } - } - } else { - // current progress of watch; <= store revision - nextRev = wr.Header.Revision - } - - if len(wr.Events) > 0 { - nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 - } - ws.initReq.rev = nextRev - - // created event is already sent above, - // watcher should not post duplicate events - if wr.Created { - continue - } - - // TODO pause channel if buffer gets too large - ws.buf = append(ws.buf, wr) - case <-w.ctx.Done(): - return - case <-ws.initReq.ctx.Done(): - return - case <-resumec: - resuming = true - return - } - } - // lazily send cancel message if events on missing id -} - -func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { - // mark all substreams as resuming - close(w.resumec) - w.resumec = make(chan struct{}) - w.joinSubstreams() - for _, ws := range w.substreams { - ws.id = -1 - w.resuming = append(w.resuming, ws) - } - // strip out nils, if any - var resuming []*watcherStream - for _, ws := range w.resuming { - if ws != nil { - resuming = append(resuming, ws) - } - } - w.resuming = resuming - w.substreams = make(map[int64]*watcherStream) - - // connect to grpc stream while accepting watcher cancelation - stopc := make(chan struct{}) - donec := w.waitCancelSubstreams(stopc) - wc, err := w.openWatchClient() - close(stopc) - <-donec - - // serve all non-closing streams, even if there's a client error - // so that the teardown path can shutdown the streams as expected. - for _, ws := range w.resuming { - if ws.closing { - continue - } - ws.donec = make(chan struct{}) - w.wg.Add(1) - go w.serveSubstream(ws, w.resumec) - } - - if err != nil { - return nil, v3rpc.Error(err) - } - - // receive data from new grpc stream - go w.serveWatchClient(wc) - return wc, nil -} - -func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { - var wg sync.WaitGroup - wg.Add(len(w.resuming)) - donec := make(chan struct{}) - for i := range w.resuming { - go func(ws *watcherStream) { - defer wg.Done() - if ws.closing { - if ws.initReq.ctx.Err() != nil && ws.outc != nil { - close(ws.outc) - ws.outc = nil - } - return - } - select { - case <-ws.initReq.ctx.Done(): - // closed ws will be removed from resuming - ws.closing = true - close(ws.outc) - ws.outc = nil - go func() { w.closingc <- ws }() - case <-stopc: - } - }(w.resuming[i]) - } - go func() { - defer close(donec) - wg.Wait() - }() - return donec -} - -// joinSubstream waits for all substream goroutines to complete -func (w *watchGrpcStream) joinSubstreams() { - for _, ws := range w.substreams { - <-ws.donec - } - for _, ws := range w.resuming { - if ws != nil { - <-ws.donec - } - } -} - -// openWatchClient retries opening a watchclient until retryConnection fails -func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { - for { - select { - case <-w.ctx.Done(): - if err == nil { - return nil, w.ctx.Err() - } - return nil, err - default: - } - if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil { - break - } - if isHaltErr(w.ctx, err) { - return nil, v3rpc.Error(err) - } - } - return ws, nil -} - -// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest) -func (wr *watchRequest) toPB() *pb.WatchRequest { - req := &pb.WatchCreateRequest{ - StartRevision: wr.rev, - Key: []byte(wr.key), - RangeEnd: []byte(wr.end), - ProgressNotify: wr.progressNotify, - Filters: wr.filters, - PrevKv: wr.prevKV, - } - cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} - return &pb.WatchRequest{RequestUnion: cr} -} diff --git a/vendor/github.com/coreos/etcd/compactor/compactor.go b/vendor/github.com/coreos/etcd/compactor/compactor.go deleted file mode 100644 index 322a0987011..00000000000 --- a/vendor/github.com/coreos/etcd/compactor/compactor.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compactor - -import ( - "sync" - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/pkg/capnslog" - "github.com/jonboulle/clockwork" - "golang.org/x/net/context" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "compactor") -) - -const ( - checkCompactionInterval = 5 * time.Minute -) - -type Compactable interface { - Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) -} - -type RevGetter interface { - Rev() int64 -} - -type Periodic struct { - clock clockwork.Clock - periodInHour int - - rg RevGetter - c Compactable - - revs []int64 - ctx context.Context - cancel context.CancelFunc - - mu sync.Mutex - paused bool -} - -func NewPeriodic(h int, rg RevGetter, c Compactable) *Periodic { - return &Periodic{ - clock: clockwork.NewRealClock(), - periodInHour: h, - rg: rg, - c: c, - } -} - -func (t *Periodic) Run() { - t.ctx, t.cancel = context.WithCancel(context.Background()) - t.revs = make([]int64, 0) - clock := t.clock - - go func() { - last := clock.Now() - for { - t.revs = append(t.revs, t.rg.Rev()) - select { - case <-t.ctx.Done(): - return - case <-clock.After(checkCompactionInterval): - t.mu.Lock() - p := t.paused - t.mu.Unlock() - if p { - continue - } - } - if clock.Now().Sub(last) < time.Duration(t.periodInHour)*time.Hour { - continue - } - - rev := t.getRev(t.periodInHour) - if rev < 0 { - continue - } - - plog.Noticef("Starting auto-compaction at revision %d", rev) - _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) - if err == nil || err == mvcc.ErrCompacted { - t.revs = make([]int64, 0) - last = clock.Now() - plog.Noticef("Finished auto-compaction at revision %d", rev) - } else { - plog.Noticef("Failed auto-compaction at revision %d (%v)", err, rev) - plog.Noticef("Retry after %v", checkCompactionInterval) - } - } - }() -} - -func (t *Periodic) Stop() { - t.cancel() -} - -func (t *Periodic) Pause() { - t.mu.Lock() - defer t.mu.Unlock() - t.paused = true -} - -func (t *Periodic) Resume() { - t.mu.Lock() - defer t.mu.Unlock() - t.paused = false -} - -func (t *Periodic) getRev(h int) int64 { - i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval) - if i < 0 { - return -1 - } - return t.revs[i] -} diff --git a/vendor/github.com/coreos/etcd/compactor/doc.go b/vendor/github.com/coreos/etcd/compactor/doc.go deleted file mode 100644 index cb158340e49..00000000000 --- a/vendor/github.com/coreos/etcd/compactor/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package compactor implements automated policies for compacting etcd's mvcc storage. -package compactor diff --git a/vendor/github.com/coreos/etcd/discovery/discovery.go b/vendor/github.com/coreos/etcd/discovery/discovery.go deleted file mode 100644 index edc842ffb97..00000000000 --- a/vendor/github.com/coreos/etcd/discovery/discovery.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package discovery provides an implementation of the cluster discovery that -// is used by etcd. -package discovery - -import ( - "errors" - "fmt" - "math" - "net/http" - "net/url" - "path" - "sort" - "strconv" - "strings" - "time" - - "github.com/coreos/etcd/client" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/pkg/capnslog" - "github.com/jonboulle/clockwork" - "golang.org/x/net/context" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "discovery") - - ErrInvalidURL = errors.New("discovery: invalid URL") - ErrBadSizeKey = errors.New("discovery: size key is bad") - ErrSizeNotFound = errors.New("discovery: size key not found") - ErrTokenNotFound = errors.New("discovery: token not found") - ErrDuplicateID = errors.New("discovery: found duplicate id") - ErrDuplicateName = errors.New("discovery: found duplicate name") - ErrFullCluster = errors.New("discovery: cluster is full") - ErrTooManyRetries = errors.New("discovery: too many retries") - ErrBadDiscoveryEndpoint = errors.New("discovery: bad discovery endpoint") -) - -var ( - // Number of retries discovery will attempt before giving up and erroring out. - nRetries = uint(math.MaxUint32) - maxExpoentialRetries = uint(8) -) - -// JoinCluster will connect to the discovery service at the given url, and -// register the server represented by the given id and config to the cluster -func JoinCluster(durl, dproxyurl string, id types.ID, config string) (string, error) { - d, err := newDiscovery(durl, dproxyurl, id) - if err != nil { - return "", err - } - return d.joinCluster(config) -} - -// GetCluster will connect to the discovery service at the given url and -// retrieve a string describing the cluster -func GetCluster(durl, dproxyurl string) (string, error) { - d, err := newDiscovery(durl, dproxyurl, 0) - if err != nil { - return "", err - } - return d.getCluster() -} - -type discovery struct { - cluster string - id types.ID - c client.KeysAPI - retries uint - url *url.URL - - clock clockwork.Clock -} - -// newProxyFunc builds a proxy function from the given string, which should -// represent a URL that can be used as a proxy. It performs basic -// sanitization of the URL and returns any error encountered. -func newProxyFunc(proxy string) (func(*http.Request) (*url.URL, error), error) { - if proxy == "" { - return nil, nil - } - // Do a small amount of URL sanitization to help the user - // Derived from net/http.ProxyFromEnvironment - proxyURL, err := url.Parse(proxy) - if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") { - // proxy was bogus. Try prepending "http://" to it and - // see if that parses correctly. If not, we ignore the - // error and complain about the original one - var err2 error - proxyURL, err2 = url.Parse("http://" + proxy) - if err2 == nil { - err = nil - } - } - if err != nil { - return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) - } - - plog.Infof("using proxy %q", proxyURL.String()) - return http.ProxyURL(proxyURL), nil -} - -func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) { - u, err := url.Parse(durl) - if err != nil { - return nil, err - } - token := u.Path - u.Path = "" - pf, err := newProxyFunc(dproxyurl) - if err != nil { - return nil, err - } - - // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early - tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second) - if err != nil { - return nil, err - } - tr.Proxy = pf - cfg := client.Config{ - Transport: tr, - Endpoints: []string{u.String()}, - } - c, err := client.New(cfg) - if err != nil { - return nil, err - } - dc := client.NewKeysAPIWithPrefix(c, "") - return &discovery{ - cluster: token, - c: dc, - id: id, - url: u, - clock: clockwork.NewRealClock(), - }, nil -} - -func (d *discovery) joinCluster(config string) (string, error) { - // fast path: if the cluster is full, return the error - // do not need to register to the cluster in this case. - if _, _, _, err := d.checkCluster(); err != nil { - return "", err - } - - if err := d.createSelf(config); err != nil { - // Fails, even on a timeout, if createSelf times out. - // TODO(barakmich): Retrying the same node might want to succeed here - // (ie, createSelf should be idempotent for discovery). - return "", err - } - - nodes, size, index, err := d.checkCluster() - if err != nil { - return "", err - } - - all, err := d.waitNodes(nodes, size, index) - if err != nil { - return "", err - } - - return nodesToCluster(all, size) -} - -func (d *discovery) getCluster() (string, error) { - nodes, size, index, err := d.checkCluster() - if err != nil { - if err == ErrFullCluster { - return nodesToCluster(nodes, size) - } - return "", err - } - - all, err := d.waitNodes(nodes, size, index) - if err != nil { - return "", err - } - return nodesToCluster(all, size) -} - -func (d *discovery) createSelf(contents string) error { - ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) - resp, err := d.c.Create(ctx, d.selfKey(), contents) - cancel() - if err != nil { - if eerr, ok := err.(client.Error); ok && eerr.Code == client.ErrorCodeNodeExist { - return ErrDuplicateID - } - return err - } - - // ensure self appears on the server we connected to - w := d.c.Watcher(d.selfKey(), &client.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1}) - _, err = w.Next(context.Background()) - return err -} - -func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) { - configKey := path.Join("/", d.cluster, "_config") - ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout) - // find cluster size - resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil) - cancel() - if err != nil { - if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound { - return nil, 0, 0, ErrSizeNotFound - } - if err == client.ErrInvalidJSON { - return nil, 0, 0, ErrBadDiscoveryEndpoint - } - if ce, ok := err.(*client.ClusterError); ok { - plog.Error(ce.Detail()) - return d.checkClusterRetry() - } - return nil, 0, 0, err - } - size, err := strconv.Atoi(resp.Node.Value) - if err != nil { - return nil, 0, 0, ErrBadSizeKey - } - - ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout) - resp, err = d.c.Get(ctx, d.cluster, nil) - cancel() - if err != nil { - if ce, ok := err.(*client.ClusterError); ok { - plog.Error(ce.Detail()) - return d.checkClusterRetry() - } - return nil, 0, 0, err - } - var nodes []*client.Node - // append non-config keys to nodes - for _, n := range resp.Node.Nodes { - if !(path.Base(n.Key) == path.Base(configKey)) { - nodes = append(nodes, n) - } - } - - snodes := sortableNodes{nodes} - sort.Sort(snodes) - - // find self position - for i := range nodes { - if path.Base(nodes[i].Key) == path.Base(d.selfKey()) { - break - } - if i >= size-1 { - return nodes[:size], size, resp.Index, ErrFullCluster - } - } - return nodes, size, resp.Index, nil -} - -func (d *discovery) logAndBackoffForRetry(step string) { - d.retries++ - // logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward. - retries := d.retries - if retries > maxExpoentialRetries { - retries = maxExpoentialRetries - } - retryTimeInSecond := time.Duration(0x1< size { - nodes = nodes[:size] - } - // watch from the next index - w := d.c.Watcher(d.cluster, &client.WatcherOptions{AfterIndex: index, Recursive: true}) - all := make([]*client.Node, len(nodes)) - copy(all, nodes) - for _, n := range all { - if path.Base(n.Key) == path.Base(d.selfKey()) { - plog.Noticef("found self %s in the cluster", path.Base(d.selfKey())) - } else { - plog.Noticef("found peer %s in the cluster", path.Base(n.Key)) - } - } - - // wait for others - for len(all) < size { - plog.Noticef("found %d peer(s), waiting for %d more", len(all), size-len(all)) - resp, err := w.Next(context.Background()) - if err != nil { - if ce, ok := err.(*client.ClusterError); ok { - plog.Error(ce.Detail()) - return d.waitNodesRetry() - } - return nil, err - } - plog.Noticef("found peer %s in the cluster", path.Base(resp.Node.Key)) - all = append(all, resp.Node) - } - plog.Noticef("found %d needed peer(s)", len(all)) - return all, nil -} - -func (d *discovery) selfKey() string { - return path.Join("/", d.cluster, d.id.String()) -} - -func nodesToCluster(ns []*client.Node, size int) (string, error) { - s := make([]string, len(ns)) - for i, n := range ns { - s[i] = n.Value - } - us := strings.Join(s, ",") - m, err := types.NewURLsMap(us) - if err != nil { - return us, ErrInvalidURL - } - if m.Len() != size { - return us, ErrDuplicateName - } - return us, nil -} - -type sortableNodes struct{ Nodes []*client.Node } - -func (ns sortableNodes) Len() int { return len(ns.Nodes) } -func (ns sortableNodes) Less(i, j int) bool { - return ns.Nodes[i].CreatedIndex < ns.Nodes[j].CreatedIndex -} -func (ns sortableNodes) Swap(i, j int) { ns.Nodes[i], ns.Nodes[j] = ns.Nodes[j], ns.Nodes[i] } diff --git a/vendor/github.com/coreos/etcd/discovery/srv.go b/vendor/github.com/coreos/etcd/discovery/srv.go deleted file mode 100644 index c3d20ca9243..00000000000 --- a/vendor/github.com/coreos/etcd/discovery/srv.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package discovery - -import ( - "fmt" - "net" - "net/url" - "strings" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV - resolveTCPAddr = net.ResolveTCPAddr -) - -// SRVGetCluster gets the cluster information via DNS discovery. -// TODO(barakmich): Currently ignores priority and weight (as they don't make as much sense for a bootstrap) -// Also doesn't do any lookups for the token (though it could) -// Also sees each entry as a separate instance. -func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) { - tempName := int(0) - tcp2ap := make(map[string]url.URL) - - // First, resolve the apurls - for _, url := range apurls { - tcpAddr, err := resolveTCPAddr("tcp", url.Host) - if err != nil { - plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host) - return "", "", err - } - tcp2ap[tcpAddr.String()] = url - } - - stringParts := []string{} - updateNodeMap := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", dns) - if err != nil { - return err - } - for _, srv := range addrs { - port := fmt.Sprintf("%d", srv.Port) - host := net.JoinHostPort(srv.Target, port) - tcpAddr, err := resolveTCPAddr("tcp", host) - if err != nil { - plog.Warningf("couldn't resolve host %s during SRV discovery", host) - continue - } - n := "" - url, ok := tcp2ap[tcpAddr.String()] - if ok { - n = name - } - if n == "" { - n = fmt.Sprintf("%d", tempName) - tempName++ - } - // SRV records have a trailing dot but URL shouldn't. - shortHost := strings.TrimSuffix(srv.Target, ".") - urlHost := net.JoinHostPort(shortHost, port) - stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) - plog.Noticef("got bootstrap from DNS for %s at %s://%s", service, scheme, urlHost) - if ok && url.Scheme != scheme { - plog.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) - } - } - return nil - } - - failCount := 0 - err := updateNodeMap("etcd-server-ssl", "https") - srvErr := make([]string, 2) - if err != nil { - srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err) - failCount++ - } - err = updateNodeMap("etcd-server", "http") - if err != nil { - srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err) - failCount++ - } - if failCount == 2 { - plog.Warningf(srvErr[0]) - plog.Warningf(srvErr[1]) - plog.Errorf("SRV discovery failed: too many errors querying DNS SRV records") - return "", "", err - } - return strings.Join(stringParts, ","), defaultToken, nil -} diff --git a/vendor/github.com/coreos/etcd/error/error.go b/vendor/github.com/coreos/etcd/error/error.go deleted file mode 100644 index 8cf83cc716a..00000000000 --- a/vendor/github.com/coreos/etcd/error/error.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package error describes errors in etcd project. When any change happens, -// Documentation/v2/errorcode.md needs to be updated correspondingly. -package error - -import ( - "encoding/json" - "fmt" - "net/http" -) - -var errors = map[int]string{ - // command related errors - EcodeKeyNotFound: "Key not found", - EcodeTestFailed: "Compare failed", //test and set - EcodeNotFile: "Not a file", - ecodeNoMorePeer: "Reached the max number of peers in the cluster", - EcodeNotDir: "Not a directory", - EcodeNodeExist: "Key already exists", // create - ecodeKeyIsPreserved: "The prefix of given key is a keyword in etcd", - EcodeRootROnly: "Root is read only", - EcodeDirNotEmpty: "Directory not empty", - ecodeExistingPeerAddr: "Peer address has existed", - EcodeUnauthorized: "The request requires user authentication", - - // Post form related errors - ecodeValueRequired: "Value is Required in POST form", - EcodePrevValueRequired: "PrevValue is Required in POST form", - EcodeTTLNaN: "The given TTL in POST form is not a number", - EcodeIndexNaN: "The given index in POST form is not a number", - ecodeValueOrTTLRequired: "Value or TTL is required in POST form", - ecodeTimeoutNaN: "The given timeout in POST form is not a number", - ecodeNameRequired: "Name is required in POST form", - ecodeIndexOrValueRequired: "Index or value is required", - ecodeIndexValueMutex: "Index and value cannot both be specified", - EcodeInvalidField: "Invalid field", - EcodeInvalidForm: "Invalid POST form", - EcodeRefreshValue: "Value provided on refresh", - EcodeRefreshTTLRequired: "A TTL must be provided on refresh", - - // raft related errors - EcodeRaftInternal: "Raft Internal Error", - EcodeLeaderElect: "During Leader Election", - - // etcd related errors - EcodeWatcherCleared: "watcher is cleared due to etcd recovery", - EcodeEventIndexCleared: "The event in requested index is outdated and cleared", - ecodeStandbyInternal: "Standby Internal Error", - ecodeInvalidActiveSize: "Invalid active size", - ecodeInvalidRemoveDelay: "Standby remove delay", - - // client related errors - ecodeClientInternal: "Client Internal Error", -} - -var errorStatus = map[int]int{ - EcodeKeyNotFound: http.StatusNotFound, - EcodeNotFile: http.StatusForbidden, - EcodeDirNotEmpty: http.StatusForbidden, - EcodeUnauthorized: http.StatusUnauthorized, - EcodeTestFailed: http.StatusPreconditionFailed, - EcodeNodeExist: http.StatusPreconditionFailed, - EcodeRaftInternal: http.StatusInternalServerError, - EcodeLeaderElect: http.StatusInternalServerError, -} - -const ( - EcodeKeyNotFound = 100 - EcodeTestFailed = 101 - EcodeNotFile = 102 - ecodeNoMorePeer = 103 - EcodeNotDir = 104 - EcodeNodeExist = 105 - ecodeKeyIsPreserved = 106 - EcodeRootROnly = 107 - EcodeDirNotEmpty = 108 - ecodeExistingPeerAddr = 109 - EcodeUnauthorized = 110 - - ecodeValueRequired = 200 - EcodePrevValueRequired = 201 - EcodeTTLNaN = 202 - EcodeIndexNaN = 203 - ecodeValueOrTTLRequired = 204 - ecodeTimeoutNaN = 205 - ecodeNameRequired = 206 - ecodeIndexOrValueRequired = 207 - ecodeIndexValueMutex = 208 - EcodeInvalidField = 209 - EcodeInvalidForm = 210 - EcodeRefreshValue = 211 - EcodeRefreshTTLRequired = 212 - - EcodeRaftInternal = 300 - EcodeLeaderElect = 301 - - EcodeWatcherCleared = 400 - EcodeEventIndexCleared = 401 - ecodeStandbyInternal = 402 - ecodeInvalidActiveSize = 403 - ecodeInvalidRemoveDelay = 404 - - ecodeClientInternal = 500 -) - -type Error struct { - ErrorCode int `json:"errorCode"` - Message string `json:"message"` - Cause string `json:"cause,omitempty"` - Index uint64 `json:"index"` -} - -func NewRequestError(errorCode int, cause string) *Error { - return NewError(errorCode, cause, 0) -} - -func NewError(errorCode int, cause string, index uint64) *Error { - return &Error{ - ErrorCode: errorCode, - Message: errors[errorCode], - Cause: cause, - Index: index, - } -} - -// Error is for the error interface -func (e Error) Error() string { - return e.Message + " (" + e.Cause + ")" -} - -func (e Error) toJsonString() string { - b, _ := json.Marshal(e) - return string(b) -} - -func (e Error) StatusCode() int { - status, ok := errorStatus[e.ErrorCode] - if !ok { - status = http.StatusBadRequest - } - return status -} - -func (e Error) WriteTo(w http.ResponseWriter) { - w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(e.StatusCode()) - fmt.Fprintln(w, e.toJsonString()) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go deleted file mode 100644 index ab8cee7cf89..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "sync" - - "github.com/coreos/etcd/version" - "github.com/coreos/go-semver/semver" - "github.com/coreos/pkg/capnslog" -) - -type Capability string - -const ( - AuthCapability Capability = "auth" - V3rpcCapability Capability = "v3rpc" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api") - - // capabilityMaps is a static map of version to capability map. - // the base capabilities is the set of capability 2.0 supports. - capabilityMaps = map[string]map[Capability]bool{ - "2.3.0": {AuthCapability: true}, - "3.0.0": {AuthCapability: true, V3rpcCapability: true}, - "3.1.0": {AuthCapability: true, V3rpcCapability: true}, - } - - enableMapMu sync.RWMutex - // enabledMap points to a map in capabilityMaps - enabledMap map[Capability]bool - - curVersion *semver.Version -) - -func init() { - enabledMap = make(map[Capability]bool) -} - -// UpdateCapability updates the enabledMap when the cluster version increases. -func UpdateCapability(v *semver.Version) { - if v == nil { - // if recovered but version was never set by cluster - return - } - enableMapMu.Lock() - if curVersion != nil && !curVersion.LessThan(*v) { - enableMapMu.Unlock() - return - } - curVersion = v - enabledMap = capabilityMaps[curVersion.String()] - enableMapMu.Unlock() - plog.Infof("enabled capabilities for version %s", version.Cluster(v.String())) -} - -func IsCapabilityEnabled(c Capability) bool { - enableMapMu.RLock() - defer enableMapMu.RUnlock() - if enabledMap == nil { - return false - } - return enabledMap[c] -} - -func EnableCapability(c Capability) { - enableMapMu.Lock() - defer enableMapMu.Unlock() - enabledMap[c] = true -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go deleted file mode 100644 index 87face4a139..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package api - -import ( - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - - "github.com/coreos/go-semver/semver" -) - -// Cluster is an interface representing a collection of members in one etcd cluster. -type Cluster interface { - // ID returns the cluster ID - ID() types.ID - // ClientURLs returns an aggregate set of all URLs on which this - // cluster is listening for client requests - ClientURLs() []string - // Members returns a slice of members sorted by their ID - Members() []*membership.Member - // Member retrieves a particular member based on ID, or nil if the - // member does not exist in the cluster - Member(id types.ID) *membership.Member - // IsIDRemoved checks whether the given ID has been removed from this - // cluster at some point in the past - IsIDRemoved(id types.ID) bool - // Version is the cluster-wide minimum major.minor version. - Version() *semver.Version -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/doc.go deleted file mode 100644 index f44881be663..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package api manages the capabilities and features that are exposed to clients by the etcd cluster. -package api diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go deleted file mode 100644 index fa0bcca5e84..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "fmt" - "net/http" - - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" -) - -func capabilityHandler(c api.Capability, fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if !api.IsCapabilityEnabled(c) { - notCapable(w, r, c) - return - } - fn(w, r) - } -} - -func notCapable(w http.ResponseWriter, r *http.Request, c api.Capability) { - herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c)) - if err := herr.WriteTo(w); err != nil { - plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr) - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go deleted file mode 100644 index 038f5417e67..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go +++ /dev/null @@ -1,822 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "encoding/json" - "errors" - "expvar" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "path" - "strconv" - "strings" - "time" - - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/etcdserver/auth" - "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" - "github.com/coreos/pkg/capnslog" - "github.com/jonboulle/clockwork" - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" -) - -const ( - authPrefix = "/v2/auth" - keysPrefix = "/v2/keys" - deprecatedMachinesPrefix = "/v2/machines" - membersPrefix = "/v2/members" - statsPrefix = "/v2/stats" - varsPath = "/debug/vars" - metricsPath = "/metrics" - healthPath = "/health" - versionPath = "/version" - configPath = "/config" -) - -// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. -func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler { - sec := auth.NewStore(server, timeout) - - kh := &keysHandler{ - sec: sec, - server: server, - cluster: server.Cluster(), - timer: server, - timeout: timeout, - clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, - } - - sh := &statsHandler{ - stats: server, - } - - mh := &membersHandler{ - sec: sec, - server: server, - cluster: server.Cluster(), - timeout: timeout, - clock: clockwork.NewRealClock(), - clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, - } - - dmh := &deprecatedMachinesHandler{ - cluster: server.Cluster(), - } - - sech := &authHandler{ - sec: sec, - cluster: server.Cluster(), - clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, - } - - mux := http.NewServeMux() - mux.HandleFunc("/", http.NotFound) - mux.Handle(healthPath, healthHandler(server)) - mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) - mux.Handle(keysPrefix, kh) - mux.Handle(keysPrefix+"/", kh) - mux.HandleFunc(statsPrefix+"/store", sh.serveStore) - mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) - mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) - mux.HandleFunc(varsPath, serveVars) - mux.HandleFunc(configPath+"/local/log", logHandleFunc) - mux.Handle(metricsPath, prometheus.Handler()) - mux.Handle(membersPrefix, mh) - mux.Handle(membersPrefix+"/", mh) - mux.Handle(deprecatedMachinesPrefix, dmh) - handleAuth(mux, sech) - - return requestLogger(mux) -} - -type keysHandler struct { - sec auth.Store - server etcdserver.Server - cluster api.Cluster - timer etcdserver.RaftTimer - timeout time.Duration - clientCertAuthEnabled bool -} - -func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") { - return - } - - w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) - - ctx, cancel := context.WithTimeout(context.Background(), h.timeout) - defer cancel() - clock := clockwork.NewRealClock() - startTime := clock.Now() - rr, noValueOnSuccess, err := parseKeyRequest(r, clock) - if err != nil { - writeKeyError(w, err) - return - } - // The path must be valid at this point (we've parsed the request successfully). - if !hasKeyPrefixAccess(h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive, h.clientCertAuthEnabled) { - writeKeyNoAuth(w) - return - } - if !rr.Wait { - reportRequestReceived(rr) - } - resp, err := h.server.Do(ctx, rr) - if err != nil { - err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix) - writeKeyError(w, err) - reportRequestFailed(rr, err) - return - } - switch { - case resp.Event != nil: - if err := writeKeyEvent(w, resp.Event, noValueOnSuccess, h.timer); err != nil { - // Should never be reached - plog.Errorf("error writing event (%v)", err) - } - reportRequestCompleted(rr, resp, startTime) - case resp.Watcher != nil: - ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout) - defer cancel() - handleKeyWatch(ctx, w, resp.Watcher, rr.Stream, h.timer) - default: - writeKeyError(w, errors.New("received response with no Event/Watcher!")) - } -} - -type deprecatedMachinesHandler struct { - cluster api.Cluster -} - -func (h *deprecatedMachinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET", "HEAD") { - return - } - endpoints := h.cluster.ClientURLs() - w.Write([]byte(strings.Join(endpoints, ", "))) -} - -type membersHandler struct { - sec auth.Store - server etcdserver.Server - cluster api.Cluster - timeout time.Duration - clock clockwork.Clock - clientCertAuthEnabled bool -} - -func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") { - return - } - if !hasWriteRootAccess(h.sec, r, h.clientCertAuthEnabled) { - writeNoAuth(w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) - - ctx, cancel := context.WithTimeout(context.Background(), h.timeout) - defer cancel() - - switch r.Method { - case "GET": - switch trimPrefix(r.URL.Path, membersPrefix) { - case "": - mc := newMemberCollection(h.cluster.Members()) - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(mc); err != nil { - plog.Warningf("failed to encode members response (%v)", err) - } - case "leader": - id := h.server.Leader() - if id == 0 { - writeError(w, r, httptypes.NewHTTPError(http.StatusServiceUnavailable, "During election")) - return - } - m := newMember(h.cluster.Member(id)) - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(m); err != nil { - plog.Warningf("failed to encode members response (%v)", err) - } - default: - writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, "Not found")) - } - case "POST": - req := httptypes.MemberCreateRequest{} - if ok := unmarshalRequest(r, &req, w); !ok { - return - } - now := h.clock.Now() - m := membership.NewMember("", req.PeerURLs, "", &now) - err := h.server.AddMember(ctx, *m) - switch { - case err == membership.ErrIDExists || err == membership.ErrPeerURLexists: - writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) - return - case err != nil: - plog.Errorf("error adding member %s (%v)", m.ID, err) - writeError(w, r, err) - return - } - res := newMember(m) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusCreated) - if err := json.NewEncoder(w).Encode(res); err != nil { - plog.Warningf("failed to encode members response (%v)", err) - } - case "DELETE": - id, ok := getID(r.URL.Path, w) - if !ok { - return - } - err := h.server.RemoveMember(ctx, uint64(id)) - switch { - case err == membership.ErrIDRemoved: - writeError(w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id))) - case err == membership.ErrIDNotFound: - writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id))) - case err != nil: - plog.Errorf("error removing member %s (%v)", id, err) - writeError(w, r, err) - default: - w.WriteHeader(http.StatusNoContent) - } - case "PUT": - id, ok := getID(r.URL.Path, w) - if !ok { - return - } - req := httptypes.MemberUpdateRequest{} - if ok := unmarshalRequest(r, &req, w); !ok { - return - } - m := membership.Member{ - ID: id, - RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()}, - } - err := h.server.UpdateMember(ctx, m) - switch { - case err == membership.ErrPeerURLexists: - writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) - case err == membership.ErrIDNotFound: - writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id))) - case err != nil: - plog.Errorf("error updating member %s (%v)", m.ID, err) - writeError(w, r, err) - default: - w.WriteHeader(http.StatusNoContent) - } - } -} - -type statsHandler struct { - stats stats.Stats -} - -func (h *statsHandler) serveStore(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(h.stats.StoreStats()) -} - -func (h *statsHandler) serveSelf(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(h.stats.SelfStats()) -} - -func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - stats := h.stats.LeaderStats() - if stats == nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(stats) -} - -func serveVars(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} - -func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - if uint64(server.Leader()) == raft.None { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"health": "true"}`)) - } -} - -func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - v := c.Version() - if v != nil { - fn(w, r, v.String()) - } else { - fn(w, r, "not_decided") - } - } -} - -func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { - if !allowMethod(w, r.Method, "GET") { - return - } - vs := version.Versions{ - Server: version.Version, - Cluster: clusterV, - } - - w.Header().Set("Content-Type", "application/json") - b, err := json.Marshal(&vs) - if err != nil { - plog.Panicf("cannot marshal versions to json (%v)", err) - } - w.Write(b) -} - -func logHandleFunc(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "PUT") { - return - } - - in := struct{ Level string }{} - - d := json.NewDecoder(r.Body) - if err := d.Decode(&in); err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) - return - } - - logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) - if err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) - return - } - - plog.Noticef("globalLogLevel set to %q", logl.String()) - capnslog.SetGlobalLogLevel(logl) - w.WriteHeader(http.StatusNoContent) -} - -// parseKeyRequest converts a received http.Request on keysPrefix to -// a server Request, performing validation of supplied fields as appropriate. -// If any validation fails, an empty Request and non-nil error is returned. -func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) { - noValueOnSuccess := false - emptyReq := etcdserverpb.Request{} - - err := r.ParseForm() - if err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidForm, - err.Error(), - ) - } - - if !strings.HasPrefix(r.URL.Path, keysPrefix) { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidForm, - "incorrect key prefix", - ) - } - p := path.Join(etcdserver.StoreKeysPrefix, r.URL.Path[len(keysPrefix):]) - - var pIdx, wIdx uint64 - if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeIndexNaN, - `invalid value for "prevIndex"`, - ) - } - if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeIndexNaN, - `invalid value for "waitIndex"`, - ) - } - - var rec, sort, wait, dir, quorum, stream bool - if rec, err = getBool(r.Form, "recursive"); err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, - `invalid value for "recursive"`, - ) - } - if sort, err = getBool(r.Form, "sorted"); err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, - `invalid value for "sorted"`, - ) - } - if wait, err = getBool(r.Form, "wait"); err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, - `invalid value for "wait"`, - ) - } - // TODO(jonboulle): define what parameters dir is/isn't compatible with? - if dir, err = getBool(r.Form, "dir"); err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, - `invalid value for "dir"`, - ) - } - if quorum, err = getBool(r.Form, "quorum"); err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, - `invalid value for "quorum"`, - ) - } - if stream, err = getBool(r.Form, "stream"); err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, - `invalid value for "stream"`, - ) - } - - if wait && r.Method != "GET" { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, - `"wait" can only be used with GET requests`, - ) - } - - pV := r.FormValue("prevValue") - if _, ok := r.Form["prevValue"]; ok && pV == "" { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodePrevValueRequired, - `"prevValue" cannot be empty`, - ) - } - - if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, - `invalid value for "noValueOnSuccess"`, - ) - } - - // TTL is nullable, so leave it null if not specified - // or an empty string - var ttl *uint64 - if len(r.FormValue("ttl")) > 0 { - i, err := getUint64(r.Form, "ttl") - if err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeTTLNaN, - `invalid value for "ttl"`, - ) - } - ttl = &i - } - - // prevExist is nullable, so leave it null if not specified - var pe *bool - if _, ok := r.Form["prevExist"]; ok { - bv, err := getBool(r.Form, "prevExist") - if err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, - "invalid value for prevExist", - ) - } - pe = &bv - } - - // refresh is nullable, so leave it null if not specified - var refresh *bool - if _, ok := r.Form["refresh"]; ok { - bv, err := getBool(r.Form, "refresh") - if err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, - "invalid value for refresh", - ) - } - refresh = &bv - if refresh != nil && *refresh { - val := r.FormValue("value") - if _, ok := r.Form["value"]; ok && val != "" { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeRefreshValue, - `A value was provided on a refresh`, - ) - } - if ttl == nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeRefreshTTLRequired, - `No TTL value set`, - ) - } - } - } - - rr := etcdserverpb.Request{ - Method: r.Method, - Path: p, - Val: r.FormValue("value"), - Dir: dir, - PrevValue: pV, - PrevIndex: pIdx, - PrevExist: pe, - Wait: wait, - Since: wIdx, - Recursive: rec, - Sorted: sort, - Quorum: quorum, - Stream: stream, - } - - if pe != nil { - rr.PrevExist = pe - } - - if refresh != nil { - rr.Refresh = refresh - } - - // Null TTL is equivalent to unset Expiration - if ttl != nil { - expr := time.Duration(*ttl) * time.Second - rr.Expiration = clock.Now().Add(expr).UnixNano() - } - - return rr, noValueOnSuccess, nil -} - -// writeKeyEvent trims the prefix of key path in a single Event under -// StoreKeysPrefix, serializes it and writes the resulting JSON to the given -// ResponseWriter, along with the appropriate headers. -func writeKeyEvent(w http.ResponseWriter, ev *store.Event, noValueOnSuccess bool, rt etcdserver.RaftTimer) error { - if ev == nil { - return errors.New("cannot write empty Event!") - } - w.Header().Set("Content-Type", "application/json") - w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex)) - w.Header().Set("X-Raft-Index", fmt.Sprint(rt.Index())) - w.Header().Set("X-Raft-Term", fmt.Sprint(rt.Term())) - - if ev.IsCreated() { - w.WriteHeader(http.StatusCreated) - } - - ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) - if noValueOnSuccess && - (ev.Action == store.Set || ev.Action == store.CompareAndSwap || - ev.Action == store.Create || ev.Action == store.Update) { - ev.Node = nil - ev.PrevNode = nil - } - return json.NewEncoder(w).Encode(ev) -} - -func writeKeyNoAuth(w http.ResponseWriter) { - e := etcdErr.NewError(etcdErr.EcodeUnauthorized, "Insufficient credentials", 0) - e.WriteTo(w) -} - -// writeKeyError logs and writes the given Error to the ResponseWriter. -// If Error is not an etcdErr, the error will be converted to an etcd error. -func writeKeyError(w http.ResponseWriter, err error) { - if err == nil { - return - } - switch e := err.(type) { - case *etcdErr.Error: - e.WriteTo(w) - default: - switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost: - mlog.MergeError(err) - default: - mlog.MergeErrorf("got unexpected response error (%v)", err) - } - ee := etcdErr.NewError(etcdErr.EcodeRaftInternal, err.Error(), 0) - ee.WriteTo(w) - } -} - -func handleKeyWatch(ctx context.Context, w http.ResponseWriter, wa store.Watcher, stream bool, rt etcdserver.RaftTimer) { - defer wa.Remove() - ech := wa.EventChan() - var nch <-chan bool - if x, ok := w.(http.CloseNotifier); ok { - nch = x.CloseNotify() - } - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex())) - w.Header().Set("X-Raft-Index", fmt.Sprint(rt.Index())) - w.Header().Set("X-Raft-Term", fmt.Sprint(rt.Term())) - w.WriteHeader(http.StatusOK) - - // Ensure headers are flushed early, in case of long polling - w.(http.Flusher).Flush() - - for { - select { - case <-nch: - // Client closed connection. Nothing to do. - return - case <-ctx.Done(): - // Timed out. net/http will close the connection for us, so nothing to do. - return - case ev, ok := <-ech: - if !ok { - // If the channel is closed this may be an indication of - // that notifications are much more than we are able to - // send to the client in time. Then we simply end streaming. - return - } - ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) - if err := json.NewEncoder(w).Encode(ev); err != nil { - // Should never be reached - plog.Warningf("error writing event (%v)", err) - return - } - if !stream { - return - } - w.(http.Flusher).Flush() - } - } -} - -func trimEventPrefix(ev *store.Event, prefix string) *store.Event { - if ev == nil { - return nil - } - // Since the *Event may reference one in the store history - // history, we must copy it before modifying - e := ev.Clone() - trimNodeExternPrefix(e.Node, prefix) - trimNodeExternPrefix(e.PrevNode, prefix) - return e -} - -func trimNodeExternPrefix(n *store.NodeExtern, prefix string) { - if n == nil { - return - } - n.Key = strings.TrimPrefix(n.Key, prefix) - for _, nn := range n.Nodes { - trimNodeExternPrefix(nn, prefix) - } -} - -func trimErrorPrefix(err error, prefix string) error { - if e, ok := err.(*etcdErr.Error); ok { - e.Cause = strings.TrimPrefix(e.Cause, prefix) - } - return err -} - -func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool { - ctype := r.Header.Get("Content-Type") - semicolonPosition := strings.Index(ctype, ";") - if semicolonPosition != -1 { - ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition])) - } - if ctype != "application/json" { - writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype))) - return false - } - b, err := ioutil.ReadAll(r.Body) - if err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) - return false - } - if err := req.UnmarshalJSON(b); err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) - return false - } - return true -} - -func getID(p string, w http.ResponseWriter) (types.ID, bool) { - idStr := trimPrefix(p, membersPrefix) - if idStr == "" { - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return 0, false - } - id, err := types.IDFromString(idStr) - if err != nil { - writeError(w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr))) - return 0, false - } - return id, true -} - -// getUint64 extracts a uint64 by the given key from a Form. If the key does -// not exist in the form, 0 is returned. If the key exists but the value is -// badly formed, an error is returned. If multiple values are present only the -// first is considered. -func getUint64(form url.Values, key string) (i uint64, err error) { - if vals, ok := form[key]; ok { - i, err = strconv.ParseUint(vals[0], 10, 64) - } - return -} - -// getBool extracts a bool by the given key from a Form. If the key does not -// exist in the form, false is returned. If the key exists but the value is -// badly formed, an error is returned. If multiple values are present only the -// first is considered. -func getBool(form url.Values, key string) (b bool, err error) { - if vals, ok := form[key]; ok { - b, err = strconv.ParseBool(vals[0]) - } - return -} - -// trimPrefix removes a given prefix and any slash following the prefix -// e.g.: trimPrefix("foo", "foo") == trimPrefix("foo/", "foo") == "" -func trimPrefix(p, prefix string) (s string) { - s = strings.TrimPrefix(p, prefix) - s = strings.TrimPrefix(s, "/") - return -} - -func newMemberCollection(ms []*membership.Member) *httptypes.MemberCollection { - c := httptypes.MemberCollection(make([]httptypes.Member, len(ms))) - - for i, m := range ms { - c[i] = newMember(m) - } - - return &c -} - -func newMember(m *membership.Member) httptypes.Member { - tm := httptypes.Member{ - ID: m.ID.String(), - Name: m.Name, - PeerURLs: make([]string, len(m.PeerURLs)), - ClientURLs: make([]string, len(m.ClientURLs)), - } - - copy(tm.PeerURLs, m.PeerURLs) - copy(tm.ClientURLs, m.ClientURLs) - - return tm -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go deleted file mode 100644 index 606e2e00b3e..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "encoding/json" - "net/http" - "path" - "strings" - - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/etcdserver/auth" -) - -type authHandler struct { - sec auth.Store - cluster api.Cluster - clientCertAuthEnabled bool -} - -func hasWriteRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { - if r.Method == "GET" || r.Method == "HEAD" { - return true - } - return hasRootAccess(sec, r, clientCertAuthEnabled) -} - -func userFromBasicAuth(sec auth.Store, r *http.Request) *auth.User { - username, password, ok := r.BasicAuth() - if !ok { - plog.Warningf("auth: malformed basic auth encoding") - return nil - } - user, err := sec.GetUser(username) - if err != nil { - return nil - } - - ok = sec.CheckPassword(user, password) - if !ok { - plog.Warningf("auth: incorrect password for user: %s", username) - return nil - } - return &user -} - -func userFromClientCertificate(sec auth.Store, r *http.Request) *auth.User { - if r.TLS == nil { - return nil - } - - for _, chains := range r.TLS.VerifiedChains { - for _, chain := range chains { - plog.Debugf("auth: found common name %s.\n", chain.Subject.CommonName) - user, err := sec.GetUser(chain.Subject.CommonName) - if err == nil { - plog.Debugf("auth: authenticated user %s by cert common name.", user.User) - return &user - } - } - } - return nil -} - -func hasRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { - if sec == nil { - // No store means no auth available, eg, tests. - return true - } - if !sec.AuthEnabled() { - return true - } - - var rootUser *auth.User - if r.Header.Get("Authorization") == "" && clientCertAuthEnabled { - rootUser = userFromClientCertificate(sec, r) - if rootUser == nil { - return false - } - } else { - rootUser = userFromBasicAuth(sec, r) - if rootUser == nil { - return false - } - } - - for _, role := range rootUser.Roles { - if role == auth.RootRoleName { - return true - } - } - plog.Warningf("auth: user %s does not have the %s role for resource %s.", rootUser.User, auth.RootRoleName, r.URL.Path) - return false -} - -func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool { - if sec == nil { - // No store means no auth available, eg, tests. - return true - } - if !sec.AuthEnabled() { - return true - } - - var user *auth.User - if r.Header.Get("Authorization") == "" { - if clientCertAuthEnabled { - user = userFromClientCertificate(sec, r) - } - if user == nil { - return hasGuestAccess(sec, r, key) - } - } else { - user = userFromBasicAuth(sec, r) - if user == nil { - return false - } - } - - writeAccess := r.Method != "GET" && r.Method != "HEAD" - for _, roleName := range user.Roles { - role, err := sec.GetRole(roleName) - if err != nil { - continue - } - if recursive { - if role.HasRecursiveAccess(key, writeAccess) { - return true - } - } else if role.HasKeyAccess(key, writeAccess) { - return true - } - } - plog.Warningf("auth: invalid access for user %s on key %s.", user.User, key) - return false -} - -func hasGuestAccess(sec auth.Store, r *http.Request, key string) bool { - writeAccess := r.Method != "GET" && r.Method != "HEAD" - role, err := sec.GetRole(auth.GuestRoleName) - if err != nil { - return false - } - if role.HasKeyAccess(key, writeAccess) { - return true - } - plog.Warningf("auth: invalid access for unauthenticated user on resource %s.", key) - return false -} - -func writeNoAuth(w http.ResponseWriter, r *http.Request) { - herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials") - if err := herr.WriteTo(w); err != nil { - plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr) - } -} - -func handleAuth(mux *http.ServeMux, sh *authHandler) { - mux.HandleFunc(authPrefix+"/roles", capabilityHandler(api.AuthCapability, sh.baseRoles)) - mux.HandleFunc(authPrefix+"/roles/", capabilityHandler(api.AuthCapability, sh.handleRoles)) - mux.HandleFunc(authPrefix+"/users", capabilityHandler(api.AuthCapability, sh.baseUsers)) - mux.HandleFunc(authPrefix+"/users/", capabilityHandler(api.AuthCapability, sh.handleUsers)) - mux.HandleFunc(authPrefix+"/enable", capabilityHandler(api.AuthCapability, sh.enableDisable)) -} - -func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(w, r) - return - } - - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - roles, err := sh.sec.AllRoles() - if err != nil { - writeError(w, r, err) - return - } - if roles == nil { - roles = make([]string, 0) - } - - err = r.ParseForm() - if err != nil { - writeError(w, r, err) - return - } - - var rolesCollections struct { - Roles []auth.Role `json:"roles"` - } - for _, roleName := range roles { - var role auth.Role - role, err = sh.sec.GetRole(roleName) - if err != nil { - writeError(w, r, err) - return - } - rolesCollections.Roles = append(rolesCollections.Roles, role) - } - err = json.NewEncoder(w).Encode(rolesCollections) - - if err != nil { - plog.Warningf("baseRoles error encoding on %s", r.URL) - writeError(w, r, err) - return - } -} - -func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) { - subpath := path.Clean(r.URL.Path[len(authPrefix):]) - // Split "/roles/rolename/command". - // First item is an empty string, second is "roles" - pieces := strings.Split(subpath, "/") - if len(pieces) == 2 { - sh.baseRoles(w, r) - return - } - if len(pieces) != 3 { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) - return - } - sh.forRole(w, r, pieces[2]) -} - -func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role string) { - if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { - return - } - if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case "GET": - data, err := sh.sec.GetRole(role) - if err != nil { - writeError(w, r, err) - return - } - err = json.NewEncoder(w).Encode(data) - if err != nil { - plog.Warningf("forRole error encoding on %s", r.URL) - return - } - return - case "PUT": - var in auth.Role - err := json.NewDecoder(r.Body).Decode(&in) - if err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) - return - } - if in.Role != role { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL")) - return - } - - var out auth.Role - - // create - if in.Grant.IsEmpty() && in.Revoke.IsEmpty() { - err = sh.sec.CreateRole(in) - if err != nil { - writeError(w, r, err) - return - } - w.WriteHeader(http.StatusCreated) - out = in - } else { - if !in.Permissions.IsEmpty() { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke")) - return - } - out, err = sh.sec.UpdateRole(in) - if err != nil { - writeError(w, r, err) - return - } - w.WriteHeader(http.StatusOK) - } - - err = json.NewEncoder(w).Encode(out) - if err != nil { - plog.Warningf("forRole error encoding on %s", r.URL) - return - } - return - case "DELETE": - err := sh.sec.DeleteRole(role) - if err != nil { - writeError(w, r, err) - return - } - } -} - -type userWithRoles struct { - User string `json:"user"` - Roles []auth.Role `json:"roles,omitempty"` -} - -type usersCollections struct { - Users []userWithRoles `json:"users"` -} - -func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - users, err := sh.sec.AllUsers() - if err != nil { - writeError(w, r, err) - return - } - if users == nil { - users = make([]string, 0) - } - - err = r.ParseForm() - if err != nil { - writeError(w, r, err) - return - } - - ucs := usersCollections{} - for _, userName := range users { - var user auth.User - user, err = sh.sec.GetUser(userName) - if err != nil { - writeError(w, r, err) - return - } - - uwr := userWithRoles{User: user.User} - for _, roleName := range user.Roles { - var role auth.Role - role, err = sh.sec.GetRole(roleName) - if err != nil { - continue - } - uwr.Roles = append(uwr.Roles, role) - } - - ucs.Users = append(ucs.Users, uwr) - } - err = json.NewEncoder(w).Encode(ucs) - - if err != nil { - plog.Warningf("baseUsers error encoding on %s", r.URL) - writeError(w, r, err) - return - } -} - -func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) { - subpath := path.Clean(r.URL.Path[len(authPrefix):]) - // Split "/users/username". - // First item is an empty string, second is "users" - pieces := strings.Split(subpath, "/") - if len(pieces) == 2 { - sh.baseUsers(w, r) - return - } - if len(pieces) != 3 { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) - return - } - sh.forUser(w, r, pieces[2]) -} - -func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user string) { - if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { - return - } - if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case "GET": - u, err := sh.sec.GetUser(user) - if err != nil { - writeError(w, r, err) - return - } - - err = r.ParseForm() - if err != nil { - writeError(w, r, err) - return - } - - uwr := userWithRoles{User: u.User} - for _, roleName := range u.Roles { - var role auth.Role - role, err = sh.sec.GetRole(roleName) - if err != nil { - writeError(w, r, err) - return - } - uwr.Roles = append(uwr.Roles, role) - } - err = json.NewEncoder(w).Encode(uwr) - - if err != nil { - plog.Warningf("forUser error encoding on %s", r.URL) - return - } - return - case "PUT": - var u auth.User - err := json.NewDecoder(r.Body).Decode(&u) - if err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) - return - } - if u.User != user { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL")) - return - } - - var ( - out auth.User - created bool - ) - - if len(u.Grant) == 0 && len(u.Revoke) == 0 { - // create or update - if len(u.Roles) != 0 { - out, err = sh.sec.CreateUser(u) - } else { - // if user passes in both password and roles, we are unsure about his/her - // intention. - out, created, err = sh.sec.CreateOrUpdateUser(u) - } - - if err != nil { - writeError(w, r, err) - return - } - } else { - // update case - if len(u.Roles) != 0 { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke")) - return - } - out, err = sh.sec.UpdateUser(u) - if err != nil { - writeError(w, r, err) - return - } - } - - if created { - w.WriteHeader(http.StatusCreated) - } else { - w.WriteHeader(http.StatusOK) - } - - out.Password = "" - - err = json.NewEncoder(w).Encode(out) - if err != nil { - plog.Warningf("forUser error encoding on %s", r.URL) - return - } - return - case "DELETE": - err := sh.sec.DeleteUser(user) - if err != nil { - writeError(w, r, err) - return - } - } -} - -type enabled struct { - Enabled bool `json:"enabled"` -} - -func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { - return - } - if !hasWriteRootAccess(sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - isEnabled := sh.sec.AuthEnabled() - switch r.Method { - case "GET": - jsonDict := enabled{isEnabled} - err := json.NewEncoder(w).Encode(jsonDict) - if err != nil { - plog.Warningf("error encoding auth state on %s", r.URL) - } - case "PUT": - err := sh.sec.EnableAuth() - if err != nil { - writeError(w, r, err) - return - } - case "DELETE": - err := sh.sec.DisableAuth() - if err != nil { - writeError(w, r, err) - return - } - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go deleted file mode 100644 index 475c4b1f95a..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v2http provides etcd client and server implementations. -package v2http diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go deleted file mode 100644 index 62c99e19d4a..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "math" - "net/http" - "strings" - "time" - - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - - "github.com/coreos/etcd/etcdserver/auth" - "github.com/coreos/etcd/pkg/logutil" - "github.com/coreos/pkg/capnslog" -) - -const ( - // time to wait for a Watch request - defaultWatchTimeout = time.Duration(math.MaxInt64) -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http") - mlog = logutil.NewMergeLogger(plog) -) - -// writeError logs and writes the given Error to the ResponseWriter -// If Error is an etcdErr, it is rendered to the ResponseWriter -// Otherwise, it is assumed to be a StatusInternalServerError -func writeError(w http.ResponseWriter, r *http.Request, err error) { - if err == nil { - return - } - switch e := err.(type) { - case *etcdErr.Error: - e.WriteTo(w) - case *httptypes.HTTPError: - if et := e.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } - case auth.Error: - herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error()) - if et := herr.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } - default: - switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: - mlog.MergeError(err) - default: - mlog.MergeErrorf("got unexpected response error (%v)", err) - } - herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") - if et := herr.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } - } -} - -// allowMethod verifies that the given method is one of the allowed methods, -// and if not, it writes an error to w. A boolean is returned indicating -// whether or not the method is allowed. -func allowMethod(w http.ResponseWriter, m string, ms ...string) bool { - for _, meth := range ms { - if m == meth { - return true - } - } - w.Header().Set("Allow", strings.Join(ms, ",")) - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return false -} - -func requestLogger(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - plog.Debugf("[%s] %s remote:%s", r.Method, r.RequestURI, r.RemoteAddr) - handler.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go deleted file mode 100644 index 0657604ca97..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httptypes - -import ( - "encoding/json" - "net/http" - - "github.com/coreos/pkg/capnslog" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http/httptypes") -) - -type HTTPError struct { - Message string `json:"message"` - // Code is the HTTP status code - Code int `json:"-"` -} - -func (e HTTPError) Error() string { - return e.Message -} - -func (e HTTPError) WriteTo(w http.ResponseWriter) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(e.Code) - b, err := json.Marshal(e) - if err != nil { - plog.Panicf("marshal HTTPError should never fail (%v)", err) - } - if _, err := w.Write(b); err != nil { - return err - } - return nil -} - -func NewHTTPError(code int, m string) *HTTPError { - return &HTTPError{ - Message: m, - Code: code, - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go deleted file mode 100644 index 738d74432f8..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package httptypes defines how etcd's HTTP API entities are serialized to and -// deserialized from JSON. -package httptypes - -import ( - "encoding/json" - - "github.com/coreos/etcd/pkg/types" -) - -type Member struct { - ID string `json:"id"` - Name string `json:"name"` - PeerURLs []string `json:"peerURLs"` - ClientURLs []string `json:"clientURLs"` -} - -type MemberCreateRequest struct { - PeerURLs types.URLs -} - -type MemberUpdateRequest struct { - MemberCreateRequest -} - -func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error { - s := struct { - PeerURLs []string `json:"peerURLs"` - }{} - - err := json.Unmarshal(data, &s) - if err != nil { - return err - } - - urls, err := types.NewURLs(s.PeerURLs) - if err != nil { - return err - } - - m.PeerURLs = urls - return nil -} - -type MemberCollection []Member - -func (c *MemberCollection) MarshalJSON() ([]byte, error) { - d := struct { - Members []Member `json:"members"` - }{ - Members: []Member(*c), - } - - return json.Marshal(d) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go deleted file mode 100644 index fdfb0c6070f..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "strconv" - "time" - - "net/http" - - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/prometheus/client_golang/prometheus" -) - -var ( - incomingEvents = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "http", - Name: "received_total", - Help: "Counter of requests received into the system (successfully parsed and authd).", - }, []string{"method"}) - - failedEvents = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "http", - Name: "failed_total", - Help: "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).", - }, []string{"method", "code"}) - - successfulEventsHandlingTime = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "http", - Name: "successful_duration_seconds", - Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).", - Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13), - }, []string{"method"}) -) - -func init() { - prometheus.MustRegister(incomingEvents) - prometheus.MustRegister(failedEvents) - prometheus.MustRegister(successfulEventsHandlingTime) -} - -func reportRequestReceived(request etcdserverpb.Request) { - incomingEvents.WithLabelValues(methodFromRequest(request)).Inc() -} - -func reportRequestCompleted(request etcdserverpb.Request, response etcdserver.Response, startTime time.Time) { - method := methodFromRequest(request) - successfulEventsHandlingTime.WithLabelValues(method).Observe(time.Since(startTime).Seconds()) -} - -func reportRequestFailed(request etcdserverpb.Request, err error) { - method := methodFromRequest(request) - failedEvents.WithLabelValues(method, strconv.Itoa(codeFromError(err))).Inc() -} - -func methodFromRequest(request etcdserverpb.Request) string { - if request.Method == "GET" && request.Quorum { - return "QGET" - } - return request.Method -} - -func codeFromError(err error) int { - if err == nil { - return http.StatusInternalServerError - } - switch e := err.(type) { - case *etcdErr.Error: - return (*etcdErr.Error)(e).StatusCode() - case *httptypes.HTTPError: - return (*httptypes.HTTPError)(e).Code - default: - return http.StatusInternalServerError - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go deleted file mode 100644 index a1abadba8e7..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "encoding/json" - "net/http" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/lease/leasehttp" - "github.com/coreos/etcd/rafthttp" -) - -const ( - peerMembersPrefix = "/members" -) - -// NewPeerHandler generates an http.Handler to handle etcd peer requests. -func NewPeerHandler(s *etcdserver.EtcdServer) http.Handler { - var lh http.Handler - l := s.Lessor() - if l != nil { - lh = leasehttp.NewHandler(l, func() <-chan struct{} { return s.ApplyWait() }) - } - return newPeerHandler(s.Cluster(), s.RaftHandler(), lh) -} - -func newPeerHandler(cluster api.Cluster, raftHandler http.Handler, leaseHandler http.Handler) http.Handler { - mh := &peerMembersHandler{ - cluster: cluster, - } - - mux := http.NewServeMux() - mux.HandleFunc("/", http.NotFound) - mux.Handle(rafthttp.RaftPrefix, raftHandler) - mux.Handle(rafthttp.RaftPrefix+"/", raftHandler) - mux.Handle(peerMembersPrefix, mh) - if leaseHandler != nil { - mux.Handle(leasehttp.LeasePrefix, leaseHandler) - mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler) - } - mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion)) - return mux -} - -type peerMembersHandler struct { - cluster api.Cluster -} - -func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) - - if r.URL.Path != peerMembersPrefix { - http.Error(w, "bad path", http.StatusBadRequest) - return - } - ms := h.cluster.Members() - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(ms); err != nil { - plog.Warningf("failed to encode members response (%v)", err) - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go deleted file mode 100644 index e66c5261d0a..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" -) - -type AuthServer struct { - authenticator etcdserver.Authenticator -} - -func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer { - return &AuthServer{authenticator: s} -} - -func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - resp, err := as.authenticator.AuthEnable(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - resp, err := as.authenticator.AuthDisable(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - resp, err := as.authenticator.Authenticate(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := as.authenticator.RoleAdd(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := as.authenticator.RoleDelete(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := as.authenticator.RoleGet(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := as.authenticator.RoleList(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := as.authenticator.RoleRevokePermission(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := as.authenticator.RoleGrantPermission(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := as.authenticator.UserAdd(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := as.authenticator.UserDelete(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := as.authenticator.UserGet(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := as.authenticator.UserList(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := as.authenticator.UserGrantRole(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := as.authenticator.UserRevokeRole(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} - -func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := as.authenticator.UserChangePassword(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - return resp, nil -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go deleted file mode 100644 index 17a2c87ae61..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import "github.com/gogo/protobuf/proto" - -type codec struct{} - -func (c *codec) Marshal(v interface{}) ([]byte, error) { - b, err := proto.Marshal(v.(proto.Message)) - sentBytes.Add(float64(len(b))) - return b, err -} - -func (c *codec) Unmarshal(data []byte, v interface{}) error { - receivedBytes.Add(float64(len(data))) - return proto.Unmarshal(data, v.(proto.Message)) -} - -func (c *codec) String() string { - return "proto" -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go deleted file mode 100644 index 88174e3bac2..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "crypto/tls" - - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" -) - -func init() { - grpclog.SetLogger(plog) -} - -func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { - var opts []grpc.ServerOption - opts = append(opts, grpc.CustomCodec(&codec{})) - if tls != nil { - opts = append(opts, grpc.Creds(credentials.NewTLS(tls))) - } - opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s))) - opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s))) - - grpcServer := grpc.NewServer(opts...) - pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) - pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) - pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) - pb.RegisterClusterServer(grpcServer, NewClusterServer(s)) - pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) - pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) - - return grpcServer -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go deleted file mode 100644 index d6d7f35d50f..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type header struct { - clusterID int64 - memberID int64 - raftTimer etcdserver.RaftTimer - rev func() int64 -} - -func newHeader(s *etcdserver.EtcdServer) header { - return header{ - clusterID: int64(s.Cluster().ID()), - memberID: int64(s.ID()), - raftTimer: s, - rev: func() int64 { return s.KV().Rev() }, - } -} - -// fill populates pb.ResponseHeader using etcdserver information -func (h *header) fill(rh *pb.ResponseHeader) { - rh.ClusterId = uint64(h.clusterID) - rh.MemberId = uint64(h.memberID) - rh.RaftTerm = h.raftTimer.Term() - if rh.Revision == 0 { - rh.Revision = h.rev() - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go deleted file mode 100644 index 29aef2914a5..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "sync" - "time" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - - prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -const ( - maxNoLeaderCnt = 3 -) - -type streamsMap struct { - mu sync.Mutex - streams map[grpc.ServerStream]struct{} -} - -func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - if !api.IsCapabilityEnabled(api.V3rpcCapability) { - return nil, rpctypes.ErrGRPCNotCapable - } - - md, ok := metadata.FromContext(ctx) - if ok { - if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { - if s.Leader() == types.ID(raft.None) { - return nil, rpctypes.ErrGRPCNoLeader - } - } - } - - return prometheus.UnaryServerInterceptor(ctx, req, info, handler) - } -} - -func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor { - smap := monitorLeader(s) - - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if !api.IsCapabilityEnabled(api.V3rpcCapability) { - return rpctypes.ErrGRPCNotCapable - } - - md, ok := metadata.FromContext(ss.Context()) - if ok { - if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { - if s.Leader() == types.ID(raft.None) { - return rpctypes.ErrGRPCNoLeader - } - - cctx, cancel := context.WithCancel(ss.Context()) - ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss} - - smap.mu.Lock() - smap.streams[ss] = struct{}{} - smap.mu.Unlock() - - defer func() { - smap.mu.Lock() - delete(smap.streams, ss) - smap.mu.Unlock() - cancel() - }() - - } - } - - return prometheus.StreamServerInterceptor(srv, ss, info, handler) - } -} - -type serverStreamWithCtx struct { - grpc.ServerStream - ctx context.Context - cancel *context.CancelFunc -} - -func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx } - -func monitorLeader(s *etcdserver.EtcdServer) *streamsMap { - smap := &streamsMap{ - streams: make(map[grpc.ServerStream]struct{}), - } - - go func() { - election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond - noLeaderCnt := 0 - - for { - select { - case <-s.StopNotify(): - return - case <-time.After(election): - if s.Leader() == types.ID(raft.None) { - noLeaderCnt++ - } else { - noLeaderCnt = 0 - } - - // We are more conservative on canceling existing streams. Reconnecting streams - // cost much more than just rejecting new requests. So we wait until the member - // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams. - if noLeaderCnt >= maxNoLeaderCnt { - smap.mu.Lock() - for ss := range smap.streams { - if ssWithCtx, ok := ss.(serverStreamWithCtx); ok { - (*ssWithCtx.cancel)() - <-ss.Context().Done() - } - } - smap.streams = make(map[grpc.ServerStream]struct{}) - smap.mu.Unlock() - } - } - } - }() - - return smap -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go deleted file mode 100644 index 6ea7bbacde0..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v3rpc implements etcd v3 RPC system based on gRPC. -package v3rpc - -import ( - "sort" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/pkg/capnslog" - "golang.org/x/net/context" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc") - - // Max operations per txn list. For example, Txn.Success can have at most 128 operations, - // and Txn.Failure can have at most 128 operations. - MaxOpsPerTxn = 128 -) - -type kvServer struct { - hdr header - kv etcdserver.RaftKV -} - -func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer { - return &kvServer{hdr: newHeader(s), kv: s} -} - -func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if err := checkRangeRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.Range(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - if err := checkPutRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.Put(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - if err := checkDeleteRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.DeleteRange(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := checkTxnRequest(r); err != nil { - return nil, err - } - - resp, err := s.kv.Txn(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { - resp, err := s.kv.Compact(ctx, r) - if err != nil { - return nil, togRPCError(err) - } - - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } - s.hdr.fill(resp.Header) - return resp, nil -} - -func checkRangeRequest(r *pb.RangeRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - return nil -} - -func checkPutRequest(r *pb.PutRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - return nil -} - -func checkDeleteRequest(r *pb.DeleteRangeRequest) error { - if len(r.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - return nil -} - -func checkTxnRequest(r *pb.TxnRequest) error { - if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn { - return rpctypes.ErrGRPCTooManyOps - } - - for _, c := range r.Compare { - if len(c.Key) == 0 { - return rpctypes.ErrGRPCEmptyKey - } - } - - for _, u := range r.Success { - if err := checkRequestOp(u); err != nil { - return err - } - } - if err := checkRequestDupKeys(r.Success); err != nil { - return err - } - - for _, u := range r.Failure { - if err := checkRequestOp(u); err != nil { - return err - } - } - return checkRequestDupKeys(r.Failure) -} - -// checkRequestDupKeys gives rpctypes.ErrGRPCDuplicateKey if the same key is modified twice -func checkRequestDupKeys(reqs []*pb.RequestOp) error { - // check put overlap - keys := make(map[string]struct{}) - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestPut) - if !ok { - continue - } - preq := tv.RequestPut - if preq == nil { - continue - } - if _, ok := keys[string(preq.Key)]; ok { - return rpctypes.ErrGRPCDuplicateKey - } - keys[string(preq.Key)] = struct{}{} - } - - // no need to check deletes if no puts; delete overlaps are permitted - if len(keys) == 0 { - return nil - } - - // sort keys for range checking - sortedKeys := []string{} - for k := range keys { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - - // check put overlap with deletes - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestDeleteRange) - if !ok { - continue - } - dreq := tv.RequestDeleteRange - if dreq == nil { - continue - } - if dreq.RangeEnd == nil { - if _, found := keys[string(dreq.Key)]; found { - return rpctypes.ErrGRPCDuplicateKey - } - } else { - lo := sort.SearchStrings(sortedKeys, string(dreq.Key)) - hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd)) - if lo != hi { - // element between lo and hi => overlap - return rpctypes.ErrGRPCDuplicateKey - } - } - } - - return nil -} - -func checkRequestOp(u *pb.RequestOp) error { - // TODO: ensure only one of the field is set. - switch uv := u.Request.(type) { - case *pb.RequestOp_RequestRange: - if uv.RequestRange != nil { - return checkRangeRequest(uv.RequestRange) - } - case *pb.RequestOp_RequestPut: - if uv.RequestPut != nil { - return checkPutRequest(uv.RequestPut) - } - case *pb.RequestOp_RequestDeleteRange: - if uv.RequestDeleteRange != nil { - return checkDeleteRequest(uv.RequestDeleteRange) - } - default: - // empty op - return nil - } - return nil -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go deleted file mode 100644 index be6e20b97fb..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "io" - - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/lease" - "golang.org/x/net/context" -) - -type LeaseServer struct { - hdr header - le etcdserver.Lessor -} - -func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { - return &LeaseServer{le: s, hdr: newHeader(s)} -} - -func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - resp, err := ls.le.LeaseGrant(ctx, cr) - - if err != nil { - return nil, togRPCError(err) - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - resp, err := ls.le.LeaseRevoke(ctx, rr) - if err != nil { - return nil, togRPCError(err) - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - resp, err := ls.le.LeaseTimeToLive(ctx, rr) - if err != nil { - return nil, togRPCError(err) - } - ls.hdr.fill(resp.Header) - return resp, nil -} - -func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { - for { - req, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - // Create header before we sent out the renew request. - // This can make sure that the revision is strictly smaller or equal to - // when the keepalive happened at the local server (when the local server is the leader) - // or remote leader. - // Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded - // at rev 4. - resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}} - ls.hdr.fill(resp.Header) - - ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID)) - if err == lease.ErrLeaseNotFound { - err = nil - ttl = 0 - } - - if err != nil { - return togRPCError(err) - } - - resp.TTL = ttl - err = stream.Send(resp) - if err != nil { - return err - } - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go deleted file mode 100644 index af29ab3b71e..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "crypto/sha256" - "io" - - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/version" - "golang.org/x/net/context" -) - -type KVGetter interface { - KV() mvcc.ConsistentWatchableKV -} - -type BackendGetter interface { - Backend() backend.Backend -} - -type Alarmer interface { - Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) -} - -type RaftStatusGetter interface { - Index() uint64 - Term() uint64 - Leader() types.ID -} - -type AuthGetter interface { - AuthStore() auth.AuthStore -} - -type maintenanceServer struct { - rg RaftStatusGetter - kg KVGetter - bg BackendGetter - a Alarmer - hdr header -} - -func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer { - srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)} - return &authMaintenanceServer{srv, s} -} - -func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - plog.Noticef("starting to defragment the storage backend...") - err := ms.bg.Backend().Defrag() - if err != nil { - plog.Errorf("failed to defragment the storage backend (%v)", err) - return nil, err - } - plog.Noticef("finished defragmenting the storage backend") - return &pb.DefragmentResponse{}, nil -} - -func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { - snap := ms.bg.Backend().Snapshot() - pr, pw := io.Pipe() - - defer pr.Close() - - go func() { - snap.WriteTo(pw) - if err := snap.Close(); err != nil { - plog.Errorf("error closing snapshot (%v)", err) - } - pw.Close() - }() - - // send file data - h := sha256.New() - br := int64(0) - buf := make([]byte, 32*1024) - sz := snap.Size() - for br < sz { - n, err := io.ReadFull(pr, buf) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return togRPCError(err) - } - br += int64(n) - resp := &pb.SnapshotResponse{ - RemainingBytes: uint64(sz - br), - Blob: buf[:n], - } - if err = srv.Send(resp); err != nil { - return togRPCError(err) - } - h.Write(buf[:n]) - } - - // send sha - sha := h.Sum(nil) - hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha} - if err := srv.Send(hresp); err != nil { - return togRPCError(err) - } - - return nil -} - -func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { - h, rev, err := ms.kg.KV().Hash() - if err != nil { - return nil, togRPCError(err) - } - resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h} - ms.hdr.fill(resp.Header) - return resp, nil -} - -func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { - return ms.a.Alarm(ctx, ar) -} - -func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { - resp := &pb.StatusResponse{ - Header: &pb.ResponseHeader{Revision: ms.hdr.rev()}, - Version: version.Version, - DbSize: ms.bg.Backend().Size(), - Leader: uint64(ms.rg.Leader()), - RaftIndex: ms.rg.Index(), - RaftTerm: ms.rg.Term(), - } - ms.hdr.fill(resp.Header) - return resp, nil -} - -type authMaintenanceServer struct { - *maintenanceServer - ag AuthGetter -} - -func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error { - authInfo, err := ams.ag.AuthStore().AuthInfoFromCtx(ctx) - if err != nil { - return err - } - - return ams.ag.AuthStore().IsAdminPermitted(authInfo) -} - -func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - if err := ams.isAuthenticated(ctx); err != nil { - return nil, err - } - - return ams.maintenanceServer.Defragment(ctx, sr) -} - -func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { - if err := ams.isAuthenticated(srv.Context()); err != nil { - return err - } - - return ams.maintenanceServer.Snapshot(sr, srv) -} - -func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { - if err := ams.isAuthenticated(ctx); err != nil { - return nil, err - } - - return ams.maintenanceServer.Hash(ctx, r) -} - -func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { - return ams.maintenanceServer.Status(ctx, ar) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go deleted file mode 100644 index bcd5dac5183..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "time" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - "golang.org/x/net/context" -) - -type ClusterServer struct { - cluster api.Cluster - server etcdserver.Server - raftTimer etcdserver.RaftTimer -} - -func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer { - return &ClusterServer{ - cluster: s.Cluster(), - server: s, - raftTimer: s, - } -} - -func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { - urls, err := types.NewURLs(r.PeerURLs) - if err != nil { - return nil, rpctypes.ErrGRPCMemberBadURLs - } - - now := time.Now() - m := membership.NewMember("", urls, "", &now) - if err = cs.server.AddMember(ctx, *m); err != nil { - return nil, togRPCError(err) - } - - return &pb.MemberAddResponse{ - Header: cs.header(), - Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, - }, nil -} - -func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - if err := cs.server.RemoveMember(ctx, r.ID); err != nil { - return nil, togRPCError(err) - } - return &pb.MemberRemoveResponse{Header: cs.header()}, nil -} - -func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { - m := membership.Member{ - ID: types.ID(r.ID), - RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs}, - } - if err := cs.server.UpdateMember(ctx, m); err != nil { - return nil, togRPCError(err) - } - return &pb.MemberUpdateResponse{Header: cs.header()}, nil -} - -func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - membs := cs.cluster.Members() - - protoMembs := make([]*pb.Member, len(membs)) - for i := range membs { - protoMembs[i] = &pb.Member{ - Name: membs[i].Name, - ID: uint64(membs[i].ID), - PeerURLs: membs[i].PeerURLs, - ClientURLs: membs[i].ClientURLs, - } - } - - return &pb.MemberListResponse{Header: cs.header(), Members: protoMembs}, nil -} - -func (cs *ClusterServer) header() *pb.ResponseHeader { - return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go deleted file mode 100644 index 6cb41a61e56..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import "github.com/prometheus/client_golang/prometheus" - -var ( - sentBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "client_grpc_sent_bytes_total", - Help: "The total number of bytes sent to grpc clients.", - }) - - receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "client_grpc_received_bytes_total", - Help: "The total number of bytes received from grpc clients.", - }) -) - -func init() { - prometheus.MustRegister(sentBytes) - prometheus.MustRegister(receivedBytes) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go deleted file mode 100644 index 836f2fd3f43..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/types" - "golang.org/x/net/context" -) - -type quotaKVServer struct { - pb.KVServer - qa quotaAlarmer -} - -type quotaAlarmer struct { - q etcdserver.Quota - a Alarmer - id types.ID -} - -// check whether request satisfies the quota. If there is not enough space, -// ignore request and raise the free space alarm. -func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { - if qa.q.Available(r) { - return nil - } - req := &pb.AlarmRequest{ - MemberID: uint64(qa.id), - Action: pb.AlarmRequest_ACTIVATE, - Alarm: pb.AlarmType_NOSPACE, - } - qa.a.Alarm(ctx, req) - return rpctypes.ErrGRPCNoSpace -} - -func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { - return "aKVServer{ - NewKVServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, - } -} - -func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - if err := s.qa.check(ctx, r); err != nil { - return nil, err - } - return s.KVServer.Put(ctx, r) -} - -func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := s.qa.check(ctx, r); err != nil { - return nil, err - } - return s.KVServer.Txn(ctx, r) -} - -type quotaLeaseServer struct { - pb.LeaseServer - qa quotaAlarmer -} - -func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - if err := s.qa.check(ctx, cr); err != nil { - return nil, err - } - return s.LeaseServer.LeaseGrant(ctx, cr) -} - -func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { - return "aLeaseServer{ - NewLeaseServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go deleted file mode 100644 index f72c6a644f3..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction. -package rpctypes diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go deleted file mode 100644 index 5a3cfc0a0db..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpctypes - -import ( - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -var ( - // server-side error - ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") - ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") - ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") - ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") - ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") - ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded") - - ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found") - ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists") - - ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist") - ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists") - ErrGRPCMemberNotEnoughStarted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members") - ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid") - ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found") - - ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large") - ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests") - - ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist") - ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role") - ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists") - ErrGRPCUserEmpty = grpc.Errorf(codes.InvalidArgument, "etcdserver: user name is empty") - ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found") - ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists") - ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found") - ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password") - ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied") - ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user") - ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role") - ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled") - ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token") - - ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader") - ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable") - ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped") - ErrGRPCTimeout = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out") - ErrGRPCTimeoutDueToLeaderFail = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure") - ErrGRPCTimeoutDueToConnectionLost = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost") - ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster") - - errStringToError = map[string]error{ - grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, - grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, - grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, - grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, - grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, - grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, - - grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, - grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, - - grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist, - grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist, - grpc.ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, - grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, - grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, - - grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, - grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, - - grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist, - grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist, - grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist, - grpc.ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty, - grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, - grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, - grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, - grpc.ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, - grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, - grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, - grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, - grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, - grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, - - grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, - grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, - grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, - grpc.ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, - grpc.ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, - grpc.ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, - grpc.ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, - } - - // client-side error - ErrEmptyKey = Error(ErrGRPCEmptyKey) - ErrTooManyOps = Error(ErrGRPCTooManyOps) - ErrDuplicateKey = Error(ErrGRPCDuplicateKey) - ErrCompacted = Error(ErrGRPCCompacted) - ErrFutureRev = Error(ErrGRPCFutureRev) - ErrNoSpace = Error(ErrGRPCNoSpace) - - ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) - ErrLeaseExist = Error(ErrGRPCLeaseExist) - - ErrMemberExist = Error(ErrGRPCMemberExist) - ErrPeerURLExist = Error(ErrGRPCPeerURLExist) - ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) - ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs) - ErrMemberNotFound = Error(ErrGRPCMemberNotFound) - - ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) - ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) - - ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist) - ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist) - ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist) - ErrUserEmpty = Error(ErrGRPCUserEmpty) - ErrUserNotFound = Error(ErrGRPCUserNotFound) - ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist) - ErrRoleNotFound = Error(ErrGRPCRoleNotFound) - ErrAuthFailed = Error(ErrGRPCAuthFailed) - ErrPermissionDenied = Error(ErrGRPCPermissionDenied) - ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) - ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) - ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) - ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) - - ErrNoLeader = Error(ErrGRPCNoLeader) - ErrNotCapable = Error(ErrGRPCNotCapable) - ErrStopped = Error(ErrGRPCStopped) - ErrTimeout = Error(ErrGRPCTimeout) - ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) - ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) - ErrUnhealthy = Error(ErrGRPCUnhealthy) -) - -// EtcdError defines gRPC server errors. -// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323) -type EtcdError struct { - code codes.Code - desc string -} - -// Code returns grpc/codes.Code. -// TODO: define clientv3/codes.Code. -func (e EtcdError) Code() codes.Code { - return e.code -} - -func (e EtcdError) Error() string { - return e.desc -} - -func Error(err error) error { - if err == nil { - return nil - } - verr, ok := errStringToError[grpc.ErrorDesc(err)] - if !ok { // not gRPC error - return err - } - return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)} -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go deleted file mode 100644 index 5c590e1aec9..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rpctypes - -var ( - MetadataRequireLeaderKey = "hasleader" - MetadataHasLeader = "true" -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go deleted file mode 100644 index 5a057ed040d..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -func togRPCError(err error) error { - switch err { - case membership.ErrIDRemoved: - return rpctypes.ErrGRPCMemberNotFound - case membership.ErrIDNotFound: - return rpctypes.ErrGRPCMemberNotFound - case membership.ErrIDExists: - return rpctypes.ErrGRPCMemberExist - case membership.ErrPeerURLexists: - return rpctypes.ErrGRPCPeerURLExist - case etcdserver.ErrNotEnoughStartedMembers: - return rpctypes.ErrMemberNotEnoughStarted - - case mvcc.ErrCompacted: - return rpctypes.ErrGRPCCompacted - case mvcc.ErrFutureRev: - return rpctypes.ErrGRPCFutureRev - case lease.ErrLeaseNotFound: - return rpctypes.ErrGRPCLeaseNotFound - case etcdserver.ErrRequestTooLarge: - return rpctypes.ErrGRPCRequestTooLarge - case etcdserver.ErrNoSpace: - return rpctypes.ErrGRPCNoSpace - case etcdserver.ErrTooManyRequests: - return rpctypes.ErrTooManyRequests - - case etcdserver.ErrNoLeader: - return rpctypes.ErrGRPCNoLeader - case etcdserver.ErrStopped: - return rpctypes.ErrGRPCStopped - case etcdserver.ErrTimeout: - return rpctypes.ErrGRPCTimeout - case etcdserver.ErrTimeoutDueToLeaderFail: - return rpctypes.ErrGRPCTimeoutDueToLeaderFail - case etcdserver.ErrTimeoutDueToConnectionLost: - return rpctypes.ErrGRPCTimeoutDueToConnectionLost - case etcdserver.ErrUnhealthy: - return rpctypes.ErrGRPCUnhealthy - - case lease.ErrLeaseNotFound: - return rpctypes.ErrGRPCLeaseNotFound - case lease.ErrLeaseExists: - return rpctypes.ErrGRPCLeaseExist - - case auth.ErrRootUserNotExist: - return rpctypes.ErrGRPCRootUserNotExist - case auth.ErrRootRoleNotExist: - return rpctypes.ErrGRPCRootRoleNotExist - case auth.ErrUserAlreadyExist: - return rpctypes.ErrGRPCUserAlreadyExist - case auth.ErrUserEmpty: - return rpctypes.ErrGRPCUserEmpty - case auth.ErrUserNotFound: - return rpctypes.ErrGRPCUserNotFound - case auth.ErrRoleAlreadyExist: - return rpctypes.ErrGRPCRoleAlreadyExist - case auth.ErrRoleNotFound: - return rpctypes.ErrGRPCRoleNotFound - case auth.ErrAuthFailed: - return rpctypes.ErrGRPCAuthFailed - case auth.ErrPermissionDenied: - return rpctypes.ErrGRPCPermissionDenied - case auth.ErrRoleNotGranted: - return rpctypes.ErrGRPCRoleNotGranted - case auth.ErrPermissionNotGranted: - return rpctypes.ErrGRPCPermissionNotGranted - case auth.ErrAuthNotEnabled: - return rpctypes.ErrGRPCAuthNotEnabled - case auth.ErrInvalidAuthToken: - return rpctypes.ErrGRPCInvalidAuthToken - default: - return grpc.Errorf(codes.Unknown, err.Error()) - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go deleted file mode 100644 index f0215531dee..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v3rpc - -import ( - "io" - "sync" - "time" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -type watchServer struct { - clusterID int64 - memberID int64 - raftTimer etcdserver.RaftTimer - watchable mvcc.WatchableKV -} - -func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { - return &watchServer{ - clusterID: int64(s.Cluster().ID()), - memberID: int64(s.ID()), - raftTimer: s, - watchable: s.Watchable(), - } -} - -var ( - // External test can read this with GetProgressReportInterval() - // and change this to a small value to finish fast with - // SetProgressReportInterval(). - progressReportInterval = 10 * time.Minute - progressReportIntervalMu sync.RWMutex -) - -func GetProgressReportInterval() time.Duration { - progressReportIntervalMu.RLock() - defer progressReportIntervalMu.RUnlock() - return progressReportInterval -} - -func SetProgressReportInterval(newTimeout time.Duration) { - progressReportIntervalMu.Lock() - defer progressReportIntervalMu.Unlock() - progressReportInterval = newTimeout -} - -const ( - // We send ctrl response inside the read loop. We do not want - // send to block read, but we still want ctrl response we sent to - // be serialized. Thus we use a buffered chan to solve the problem. - // A small buffer should be OK for most cases, since we expect the - // ctrl requests are infrequent. - ctrlStreamBufLen = 16 -) - -// serverWatchStream is an etcd server side stream. It receives requests -// from client side gRPC stream. It receives watch events from mvcc.WatchStream, -// and creates responses that forwarded to gRPC stream. -// It also forwards control message like watch created and canceled. -type serverWatchStream struct { - clusterID int64 - memberID int64 - raftTimer etcdserver.RaftTimer - - watchable mvcc.WatchableKV - - gRPCStream pb.Watch_WatchServer - watchStream mvcc.WatchStream - ctrlStream chan *pb.WatchResponse - - // mu protects progress, prevKV - mu sync.Mutex - // progress tracks the watchID that stream might need to send - // progress to. - // TODO: combine progress and prevKV into a single struct? - progress map[mvcc.WatchID]bool - prevKV map[mvcc.WatchID]bool - - // closec indicates the stream is closed. - closec chan struct{} - - // wg waits for the send loop to complete - wg sync.WaitGroup -} - -func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { - sws := serverWatchStream{ - clusterID: ws.clusterID, - memberID: ws.memberID, - raftTimer: ws.raftTimer, - - watchable: ws.watchable, - - gRPCStream: stream, - watchStream: ws.watchable.NewWatchStream(), - // chan for sending control response like watcher created and canceled. - ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen), - progress: make(map[mvcc.WatchID]bool), - prevKV: make(map[mvcc.WatchID]bool), - closec: make(chan struct{}), - } - - sws.wg.Add(1) - go func() { - sws.sendLoop() - sws.wg.Done() - }() - - errc := make(chan error, 1) - // Ideally recvLoop would also use sws.wg to signal its completion - // but when stream.Context().Done() is closed, the stream's recv - // may continue to block since it uses a different context, leading to - // deadlock when calling sws.close(). - go func() { - if rerr := sws.recvLoop(); rerr != nil { - errc <- rerr - } - }() - select { - case err = <-errc: - close(sws.ctrlStream) - case <-stream.Context().Done(): - err = stream.Context().Err() - // the only server-side cancellation is noleader for now. - if err == context.Canceled { - err = rpctypes.ErrGRPCNoLeader - } - } - sws.close() - return err -} - -func (sws *serverWatchStream) recvLoop() error { - for { - req, err := sws.gRPCStream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - switch uv := req.RequestUnion.(type) { - case *pb.WatchRequest_CreateRequest: - if uv.CreateRequest == nil { - break - } - - creq := uv.CreateRequest - if len(creq.Key) == 0 { - // \x00 is the smallest key - creq.Key = []byte{0} - } - if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 { - // support >= key queries - creq.RangeEnd = []byte{} - } - filters := FiltersFromRequest(creq) - - wsrev := sws.watchStream.Rev() - rev := creq.StartRevision - if rev == 0 { - rev = wsrev + 1 - } - id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...) - if id != -1 { - sws.mu.Lock() - if creq.ProgressNotify { - sws.progress[id] = true - } - if creq.PrevKv { - sws.prevKV[id] = true - } - sws.mu.Unlock() - } - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(wsrev), - WatchId: int64(id), - Created: true, - Canceled: id == -1, - } - select { - case sws.ctrlStream <- wr: - case <-sws.closec: - return nil - } - case *pb.WatchRequest_CancelRequest: - if uv.CancelRequest != nil { - id := uv.CancelRequest.WatchId - err := sws.watchStream.Cancel(mvcc.WatchID(id)) - if err == nil { - sws.ctrlStream <- &pb.WatchResponse{ - Header: sws.newResponseHeader(sws.watchStream.Rev()), - WatchId: id, - Canceled: true, - } - sws.mu.Lock() - delete(sws.progress, mvcc.WatchID(id)) - delete(sws.prevKV, mvcc.WatchID(id)) - sws.mu.Unlock() - } - } - default: - // we probably should not shutdown the entire stream when - // receive an valid command. - // so just do nothing instead. - continue - } - } -} - -func (sws *serverWatchStream) sendLoop() { - // watch ids that are currently active - ids := make(map[mvcc.WatchID]struct{}) - // watch responses pending on a watch id creation message - pending := make(map[mvcc.WatchID][]*pb.WatchResponse) - - interval := GetProgressReportInterval() - progressTicker := time.NewTicker(interval) - - defer func() { - progressTicker.Stop() - // drain the chan to clean up pending events - for ws := range sws.watchStream.Chan() { - mvcc.ReportEventReceived(len(ws.Events)) - } - for _, wrs := range pending { - for _, ws := range wrs { - mvcc.ReportEventReceived(len(ws.Events)) - } - } - }() - - for { - select { - case wresp, ok := <-sws.watchStream.Chan(): - if !ok { - return - } - - // TODO: evs is []mvccpb.Event type - // either return []*mvccpb.Event from the mvcc package - // or define protocol buffer with []mvccpb.Event. - evs := wresp.Events - events := make([]*mvccpb.Event, len(evs)) - sws.mu.Lock() - needPrevKV := sws.prevKV[wresp.WatchID] - sws.mu.Unlock() - for i := range evs { - events[i] = &evs[i] - - if needPrevKV { - opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1} - r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt) - if err == nil && len(r.KVs) != 0 { - events[i].PrevKv = &(r.KVs[0]) - } - } - } - - wr := &pb.WatchResponse{ - Header: sws.newResponseHeader(wresp.Revision), - WatchId: int64(wresp.WatchID), - Events: events, - CompactRevision: wresp.CompactRevision, - } - - if _, hasId := ids[wresp.WatchID]; !hasId { - // buffer if id not yet announced - wrs := append(pending[wresp.WatchID], wr) - pending[wresp.WatchID] = wrs - continue - } - - mvcc.ReportEventReceived(len(evs)) - if err := sws.gRPCStream.Send(wr); err != nil { - return - } - - sws.mu.Lock() - if len(evs) > 0 && sws.progress[wresp.WatchID] { - // elide next progress update if sent a key update - sws.progress[wresp.WatchID] = false - } - sws.mu.Unlock() - - case c, ok := <-sws.ctrlStream: - if !ok { - return - } - - if err := sws.gRPCStream.Send(c); err != nil { - return - } - - // track id creation - wid := mvcc.WatchID(c.WatchId) - if c.Canceled { - delete(ids, wid) - continue - } - if c.Created { - // flush buffered events - ids[wid] = struct{}{} - for _, v := range pending[wid] { - mvcc.ReportEventReceived(len(v.Events)) - if err := sws.gRPCStream.Send(v); err != nil { - return - } - } - delete(pending, wid) - } - case <-progressTicker.C: - sws.mu.Lock() - for id, ok := range sws.progress { - if ok { - sws.watchStream.RequestProgress(id) - } - sws.progress[id] = true - } - sws.mu.Unlock() - case <-sws.closec: - return - } - } -} - -func (sws *serverWatchStream) close() { - sws.watchStream.Close() - close(sws.closec) - sws.wg.Wait() -} - -func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader { - return &pb.ResponseHeader{ - ClusterId: uint64(sws.clusterID), - MemberId: uint64(sws.memberID), - Revision: rev, - RaftTerm: sws.raftTimer.Term(), - } -} - -func filterNoDelete(e mvccpb.Event) bool { - return e.Type == mvccpb.DELETE -} - -func filterNoPut(e mvccpb.Event) bool { - return e.Type == mvccpb.PUT -} - -func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc { - filters := make([]mvcc.FilterFunc, 0, len(creq.Filters)) - for _, ft := range creq.Filters { - switch ft { - case pb.WatchCreateRequest_NOPUT: - filters = append(filters, filterNoPut) - case pb.WatchCreateRequest_NODELETE: - filters = append(filters, filterNoDelete) - default: - } - } - return filters -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go deleted file mode 100644 index e4bf35bc47e..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/apply.go +++ /dev/null @@ -1,902 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "bytes" - "fmt" - "sort" - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/types" - "github.com/gogo/protobuf/proto" - "golang.org/x/net/context" -) - -const ( - // noTxn is an invalid txn ID. - // To apply with independent Range, Put, Delete, you can pass noTxn - // to apply functions instead of a valid txn ID. - noTxn = -1 - - warnApplyDuration = 100 * time.Millisecond -) - -type applyResult struct { - resp proto.Message - err error - // physc signals the physical effect of the request has completed in addition - // to being logically reflected by the node. Currently only used for - // Compaction requests. - physc <-chan struct{} -} - -// applierV3 is the interface for processing V3 raft messages -type applierV3 interface { - Apply(r *pb.InternalRaftRequest) *applyResult - - Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) - Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) - DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) - Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) - Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) - - LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) - LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) - - Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error) - - Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) - - AuthEnable() (*pb.AuthEnableResponse, error) - AuthDisable() (*pb.AuthDisableResponse, error) - - UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) -} - -type applierV3backend struct { - s *EtcdServer -} - -func (s *EtcdServer) newApplierV3() applierV3 { - return newAuthApplierV3( - s.AuthStore(), - newQuotaApplierV3(s, &applierV3backend{s}), - ) -} - -func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { - ar := &applyResult{} - - // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls - switch { - case r.Range != nil: - ar.resp, ar.err = a.s.applyV3.Range(noTxn, r.Range) - case r.Put != nil: - ar.resp, ar.err = a.s.applyV3.Put(noTxn, r.Put) - case r.DeleteRange != nil: - ar.resp, ar.err = a.s.applyV3.DeleteRange(noTxn, r.DeleteRange) - case r.Txn != nil: - ar.resp, ar.err = a.s.applyV3.Txn(r.Txn) - case r.Compaction != nil: - ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction) - case r.LeaseGrant != nil: - ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant) - case r.LeaseRevoke != nil: - ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke) - case r.Alarm != nil: - ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm) - case r.Authenticate != nil: - ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate) - case r.AuthEnable != nil: - ar.resp, ar.err = a.s.applyV3.AuthEnable() - case r.AuthDisable != nil: - ar.resp, ar.err = a.s.applyV3.AuthDisable() - case r.AuthUserAdd != nil: - ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd) - case r.AuthUserDelete != nil: - ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete) - case r.AuthUserChangePassword != nil: - ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword) - case r.AuthUserGrantRole != nil: - ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole) - case r.AuthUserGet != nil: - ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet) - case r.AuthUserRevokeRole != nil: - ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole) - case r.AuthRoleAdd != nil: - ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd) - case r.AuthRoleGrantPermission != nil: - ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) - case r.AuthRoleGet != nil: - ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet) - case r.AuthRoleRevokePermission != nil: - ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) - case r.AuthRoleDelete != nil: - ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete) - case r.AuthUserList != nil: - ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList) - case r.AuthRoleList != nil: - ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList) - default: - panic("not implemented") - } - return ar -} - -func (a *applierV3backend) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { - resp := &pb.PutResponse{} - resp.Header = &pb.ResponseHeader{} - var ( - rev int64 - err error - ) - - var rr *mvcc.RangeResult - if p.PrevKv { - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, p.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(p.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } - } - - if txnID != noTxn { - rev, err = a.s.KV().TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease)) - if err != nil { - return nil, err - } - } else { - leaseID := lease.LeaseID(p.Lease) - if leaseID != lease.NoLease { - if l := a.s.lessor.Lookup(leaseID); l == nil { - return nil, lease.ErrLeaseNotFound - } - } - rev = a.s.KV().Put(p.Key, p.Value, leaseID) - } - resp.Header.Revision = rev - if rr != nil && len(rr.KVs) != 0 { - resp.PrevKv = &rr.KVs[0] - } - return resp, nil -} - -func (a *applierV3backend) DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - resp := &pb.DeleteRangeResponse{} - resp.Header = &pb.ResponseHeader{} - - var ( - n int64 - rev int64 - err error - ) - - if isGteRange(dr.RangeEnd) { - dr.RangeEnd = []byte{} - } - - var rr *mvcc.RangeResult - if dr.PrevKv { - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } - } - - if txnID != noTxn { - n, rev, err = a.s.KV().TxnDeleteRange(txnID, dr.Key, dr.RangeEnd) - if err != nil { - return nil, err - } - } else { - n, rev = a.s.KV().DeleteRange(dr.Key, dr.RangeEnd) - } - - resp.Deleted = n - if rr != nil { - for i := range rr.KVs { - resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) - } - } - resp.Header.Revision = rev - return resp, nil -} - -func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) { - resp := &pb.RangeResponse{} - resp.Header = &pb.ResponseHeader{} - - var ( - rr *mvcc.RangeResult - err error - ) - - if isGteRange(r.RangeEnd) { - r.RangeEnd = []byte{} - } - - limit := r.Limit - if r.SortOrder != pb.RangeRequest_NONE || - r.MinModRevision != 0 || r.MaxModRevision != 0 || - r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 { - // fetch everything; sort and truncate afterwards - limit = 0 - } - if limit > 0 { - // fetch one extra for 'more' flag - limit = limit + 1 - } - - ro := mvcc.RangeOptions{ - Limit: limit, - Rev: r.Revision, - Count: r.CountOnly, - } - - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, r.Key, r.RangeEnd, ro) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(r.Key, r.RangeEnd, ro) - if err != nil { - return nil, err - } - } - - if r.MaxModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision } - pruneKVs(rr, f) - } - if r.MinModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision } - pruneKVs(rr, f) - } - if r.MaxCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision } - pruneKVs(rr, f) - } - if r.MinCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision } - pruneKVs(rr, f) - } - - sortOrder := r.SortOrder - if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE { - // Since current mvcc.Range implementation returns results - // sorted by keys in lexiographically ascending order, - // sort ASCEND by default only when target is not 'KEY' - sortOrder = pb.RangeRequest_ASCEND - } - if sortOrder != pb.RangeRequest_NONE { - var sorter sort.Interface - switch { - case r.SortTarget == pb.RangeRequest_KEY: - sorter = &kvSortByKey{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VERSION: - sorter = &kvSortByVersion{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_CREATE: - sorter = &kvSortByCreate{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_MOD: - sorter = &kvSortByMod{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VALUE: - sorter = &kvSortByValue{&kvSort{rr.KVs}} - } - switch { - case sortOrder == pb.RangeRequest_ASCEND: - sort.Sort(sorter) - case sortOrder == pb.RangeRequest_DESCEND: - sort.Sort(sort.Reverse(sorter)) - } - } - - if r.Limit > 0 && len(rr.KVs) > int(r.Limit) { - rr.KVs = rr.KVs[:r.Limit] - resp.More = true - } - - resp.Header.Revision = rr.Rev - resp.Count = int64(rr.Count) - for i := range rr.KVs { - if r.KeysOnly { - rr.KVs[i].Value = nil - } - resp.Kvs = append(resp.Kvs, &rr.KVs[i]) - } - return resp, nil -} - -func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - ok := true - for _, c := range rt.Compare { - if _, ok = a.applyCompare(c); !ok { - break - } - } - - var reqs []*pb.RequestOp - if ok { - reqs = rt.Success - } else { - reqs = rt.Failure - } - - if err := a.checkRequestLeases(reqs); err != nil { - return nil, err - } - if err := a.checkRequestRange(reqs); err != nil { - return nil, err - } - - // When executing the operations of txn, we need to hold the txn lock. - // So the reader will not see any intermediate results. - txnID := a.s.KV().TxnBegin() - - resps := make([]*pb.ResponseOp, len(reqs)) - for i := range reqs { - resps[i] = a.applyUnion(txnID, reqs[i]) - } - - err := a.s.KV().TxnEnd(txnID) - if err != nil { - panic(fmt.Sprint("unexpected error when closing txn", txnID)) - } - - txnResp := &pb.TxnResponse{} - txnResp.Header = &pb.ResponseHeader{} - txnResp.Header.Revision = a.s.KV().Rev() - txnResp.Responses = resps - txnResp.Succeeded = ok - return txnResp, nil -} - -// applyCompare applies the compare request. -// It returns the revision at which the comparison happens. If the comparison -// succeeds, the it returns true. Otherwise it returns false. -func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { - rr, err := a.s.KV().Range(c.Key, nil, mvcc.RangeOptions{}) - rev := rr.Rev - - if err != nil { - if err == mvcc.ErrTxnIDMismatch { - panic("unexpected txn ID mismatch error") - } - return rev, false - } - var ckv mvccpb.KeyValue - if len(rr.KVs) != 0 { - ckv = rr.KVs[0] - } else { - // Use the zero value of ckv normally. However... - if c.Target == pb.Compare_VALUE { - // Always fail if we're comparing a value on a key that doesn't exist. - // We can treat non-existence as the empty set explicitly, such that - // even a key with a value of length 0 bytes is still a real key - // that was written that way - return rev, false - } - } - - // -1 is less, 0 is equal, 1 is greater - var result int - switch c.Target { - case pb.Compare_VALUE: - tv, _ := c.TargetUnion.(*pb.Compare_Value) - if tv != nil { - result = bytes.Compare(ckv.Value, tv.Value) - } - case pb.Compare_CREATE: - tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision) - if tv != nil { - result = compareInt64(ckv.CreateRevision, tv.CreateRevision) - } - - case pb.Compare_MOD: - tv, _ := c.TargetUnion.(*pb.Compare_ModRevision) - if tv != nil { - result = compareInt64(ckv.ModRevision, tv.ModRevision) - } - case pb.Compare_VERSION: - tv, _ := c.TargetUnion.(*pb.Compare_Version) - if tv != nil { - result = compareInt64(ckv.Version, tv.Version) - } - } - - switch c.Result { - case pb.Compare_EQUAL: - if result != 0 { - return rev, false - } - case pb.Compare_NOT_EQUAL: - if result == 0 { - return rev, false - } - case pb.Compare_GREATER: - if result != 1 { - return rev, false - } - case pb.Compare_LESS: - if result != -1 { - return rev, false - } - } - return rev, true -} - -func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.ResponseOp { - switch tv := union.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange != nil { - resp, err := a.Range(txnID, tv.RequestRange) - if err != nil { - plog.Panicf("unexpected error during txn: %v", err) - } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}} - } - case *pb.RequestOp_RequestPut: - if tv.RequestPut != nil { - resp, err := a.Put(txnID, tv.RequestPut) - if err != nil { - plog.Panicf("unexpected error during txn: %v", err) - } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}} - } - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange != nil { - resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange) - if err != nil { - plog.Panicf("unexpected error during txn: %v", err) - } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}} - } - default: - // empty union - return nil - } - return nil - -} - -func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) { - resp := &pb.CompactionResponse{} - resp.Header = &pb.ResponseHeader{} - ch, err := a.s.KV().Compact(compaction.Revision) - if err != nil { - return nil, ch, err - } - // get the current revision. which key to get is not important. - rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{}) - resp.Header.Revision = rr.Rev - return resp, ch, err -} - -func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) - resp := &pb.LeaseGrantResponse{} - if err == nil { - resp.ID = int64(l.ID) - resp.TTL = l.TTL() - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - err := a.s.lessor.Revoke(lease.LeaseID(lc.ID)) - return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err -} - -func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { - resp := &pb.AlarmResponse{} - oldCount := len(a.s.alarmStore.Get(ar.Alarm)) - - switch ar.Action { - case pb.AlarmRequest_GET: - resp.Alarms = a.s.alarmStore.Get(ar.Alarm) - case pb.AlarmRequest_ACTIVATE: - m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) - if m == nil { - break - } - resp.Alarms = append(resp.Alarms, m) - activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1 - if !activated { - break - } - - switch m.Alarm { - case pb.AlarmType_NOSPACE: - plog.Warningf("alarm raised %+v", m) - a.s.applyV3 = newApplierV3Capped(a) - default: - plog.Errorf("unimplemented alarm activation (%+v)", m) - } - case pb.AlarmRequest_DEACTIVATE: - m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) - if m == nil { - break - } - resp.Alarms = append(resp.Alarms, m) - deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0 - if !deactivated { - break - } - - switch m.Alarm { - case pb.AlarmType_NOSPACE: - plog.Infof("alarm disarmed %+v", ar) - a.s.applyV3 = a.s.newApplierV3() - default: - plog.Errorf("unimplemented alarm deactivation (%+v)", m) - } - default: - return nil, nil - } - return resp, nil -} - -type applierV3Capped struct { - applierV3 - q backendQuota -} - -// newApplierV3Capped creates an applyV3 that will reject Puts and transactions -// with Puts so that the number of keys in the store is capped. -func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } - -func (a *applierV3Capped) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { - return nil, ErrNoSpace -} - -func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) { - if a.q.Cost(r) > 0 { - return nil, ErrNoSpace - } - return a.applierV3.Txn(r) -} - -func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, ErrNoSpace -} - -func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { - err := a.s.AuthStore().AuthEnable() - if err != nil { - return nil, err - } - return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil -} - -func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { - a.s.AuthStore().AuthDisable() - return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil -} - -func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { - ctx := context.WithValue(context.WithValue(context.Background(), "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) - resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := a.s.AuthStore().UserAdd(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := a.s.AuthStore().UserDelete(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := a.s.AuthStore().UserChangePassword(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := a.s.AuthStore().UserGrantRole(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := a.s.AuthStore().UserGet(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := a.s.AuthStore().UserRevokeRole(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := a.s.AuthStore().RoleAdd(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := a.s.AuthStore().RoleGrantPermission(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := a.s.AuthStore().RoleGet(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := a.s.AuthStore().RoleRevokePermission(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := a.s.AuthStore().RoleDelete(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := a.s.AuthStore().UserList(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := a.s.AuthStore().RoleList(r) - if resp != nil { - resp.Header = newHeader(a.s) - } - return resp, err -} - -type quotaApplierV3 struct { - applierV3 - q Quota -} - -func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { - return "aApplierV3{app, NewBackendQuota(s)} -} - -func (a *quotaApplierV3) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { - ok := a.q.Available(p) - resp, err := a.applierV3.Put(txnID, p) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, err -} - -func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - ok := a.q.Available(rt) - resp, err := a.applierV3.Txn(rt) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, err -} - -func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - ok := a.q.Available(lc) - resp, err := a.applierV3.LeaseGrant(lc) - if err == nil && !ok { - err = ErrNoSpace - } - return resp, err -} - -type kvSort struct{ kvs []mvccpb.KeyValue } - -func (s *kvSort) Swap(i, j int) { - t := s.kvs[i] - s.kvs[i] = s.kvs[j] - s.kvs[j] = t -} -func (s *kvSort) Len() int { return len(s.kvs) } - -type kvSortByKey struct{ *kvSort } - -func (s *kvSortByKey) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0 -} - -type kvSortByVersion struct{ *kvSort } - -func (s *kvSortByVersion) Less(i, j int) bool { - return (s.kvs[i].Version - s.kvs[j].Version) < 0 -} - -type kvSortByCreate struct{ *kvSort } - -func (s *kvSortByCreate) Less(i, j int) bool { - return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0 -} - -type kvSortByMod struct{ *kvSort } - -func (s *kvSortByMod) Less(i, j int) bool { - return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0 -} - -type kvSortByValue struct{ *kvSort } - -func (s *kvSortByValue) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 -} - -func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error { - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestPut) - if !ok { - continue - } - preq := tv.RequestPut - if preq == nil || lease.LeaseID(preq.Lease) == lease.NoLease { - continue - } - if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil { - return lease.ErrLeaseNotFound - } - } - return nil -} - -func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error { - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestRange) - if !ok { - continue - } - greq := tv.RequestRange - if greq == nil || greq.Revision == 0 { - continue - } - - if greq.Revision > a.s.KV().Rev() { - return mvcc.ErrFutureRev - } - if greq.Revision < a.s.KV().FirstRev() { - return mvcc.ErrCompacted - } - } - return nil -} - -func compareInt64(a, b int64) int { - switch { - case a < b: - return -1 - case a > b: - return 1 - default: - return 0 - } -} - -// isGteRange determines if the range end is a >= range. This works around grpc -// sending empty byte strings as nil; >= is encoded in the range end as '\0'. -func isGteRange(rangeEnd []byte) bool { - return len(rangeEnd) == 1 && rangeEnd[0] == 0 -} - -func noSideEffect(r *pb.InternalRaftRequest) bool { - return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil -} - -func removeNeedlessRangeReqs(txn *pb.TxnRequest) { - f := func(ops []*pb.RequestOp) []*pb.RequestOp { - j := 0 - for i := 0; i < len(ops); i++ { - if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok { - continue - } - ops[j] = ops[i] - j++ - } - - return ops[:j] - } - - txn.Success = f(txn.Success) - txn.Failure = f(txn.Failure) -} - -func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) { - j := 0 - for i := range rr.KVs { - rr.KVs[j] = rr.KVs[i] - if !isPrunable(&rr.KVs[i]) { - j++ - } - } - rr.KVs = rr.KVs[:j] -} - -func newHeader(s *EtcdServer) *pb.ResponseHeader { - return &pb.ResponseHeader{ - ClusterId: uint64(s.Cluster().ID()), - MemberId: uint64(s.ID()), - Revision: s.KV().Rev(), - RaftTerm: s.Term(), - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go deleted file mode 100644 index 4868e855ca1..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "sync" - - "github.com/coreos/etcd/auth" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type authApplierV3 struct { - applierV3 - as auth.AuthStore - - // mu serializes Apply so that user isn't corrupted and so that - // serialized requests don't leak data from TOCTOU errors - mu sync.Mutex - - authInfo auth.AuthInfo -} - -func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 { - return &authApplierV3{applierV3: base, as: as} -} - -func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult { - aa.mu.Lock() - defer aa.mu.Unlock() - if r.Header != nil { - // backward-compatible with pre-3.0 releases when internalRaftRequest - // does not have header field - aa.authInfo.Username = r.Header.Username - aa.authInfo.Revision = r.Header.AuthRevision - } - if needAdminPermission(r) { - if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil { - aa.authInfo.Username = "" - aa.authInfo.Revision = 0 - return &applyResult{err: err} - } - } - ret := aa.applierV3.Apply(r) - aa.authInfo.Username = "" - aa.authInfo.Revision = 0 - return ret -} - -func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) { - if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { - return nil, err - } - if r.PrevKv { - err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil) - if err != nil { - return nil, err - } - } - return aa.applierV3.Put(txnID, r) -} - -func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { - return nil, err - } - return aa.applierV3.Range(txnID, r) -} - -func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { - return nil, err - } - if r.PrevKv { - err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd) - if err != nil { - return nil, err - } - } - - return aa.applierV3.DeleteRange(txnID, r) -} - -func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { - for _, requ := range reqs { - switch tv := requ.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange == nil { - continue - } - - if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil { - return err - } - - case *pb.RequestOp_RequestPut: - if tv.RequestPut == nil { - continue - } - - if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil { - return err - } - - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange == nil { - continue - } - - if tv.RequestDeleteRange.PrevKv { - err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } - } - - err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } - } - } - - return nil -} - -func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { - for _, c := range rt.Compare { - if err := as.IsRangePermitted(ai, c.Key, nil); err != nil { - return err - } - } - if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { - return err - } - if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil { - return err - } - return nil -} - -func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil { - return nil, err - } - return aa.applierV3.Txn(rt) -} - -func needAdminPermission(r *pb.InternalRaftRequest) bool { - switch { - case r.AuthEnable != nil: - return true - case r.AuthDisable != nil: - return true - case r.AuthUserAdd != nil: - return true - case r.AuthUserDelete != nil: - return true - case r.AuthUserChangePassword != nil: - return true - case r.AuthUserGrantRole != nil: - return true - case r.AuthUserGet != nil: - return true - case r.AuthUserRevokeRole != nil: - return true - case r.AuthRoleAdd != nil: - return true - case r.AuthRoleGrantPermission != nil: - return true - case r.AuthRoleGet != nil: - return true - case r.AuthRoleRevokePermission != nil: - return true - case r.AuthRoleDelete != nil: - return true - case r.AuthUserList != nil: - return true - case r.AuthRoleList != nil: - return true - default: - return false - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go deleted file mode 100644 index f278efca88e..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "path" - "time" - - "github.com/coreos/etcd/etcdserver/api" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/store" - "github.com/coreos/go-semver/semver" -) - -// ApplierV2 is the interface for processing V2 raft messages -type ApplierV2 interface { - Delete(r *pb.Request) Response - Post(r *pb.Request) Response - Put(r *pb.Request) Response - QGet(r *pb.Request) Response - Sync(r *pb.Request) Response -} - -func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 { - return &applierV2store{store: s, cluster: c} -} - -type applierV2store struct { - store store.Store - cluster *membership.RaftCluster -} - -func (a *applierV2store) Delete(r *pb.Request) Response { - switch { - case r.PrevIndex > 0 || r.PrevValue != "": - return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex)) - default: - return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive)) - } -} - -func (a *applierV2store) Post(r *pb.Request) Response { - return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, toTTLOptions(r))) -} - -func (a *applierV2store) Put(r *pb.Request) Response { - ttlOptions := toTTLOptions(r) - exists, existsSet := pbutil.GetBool(r.PrevExist) - switch { - case existsSet: - if exists { - if r.PrevIndex == 0 && r.PrevValue == "" { - return toResponse(a.store.Update(r.Path, r.Val, ttlOptions)) - } - return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) - } - return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions)) - case r.PrevIndex > 0 || r.PrevValue != "": - return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions)) - default: - if storeMemberAttributeRegexp.MatchString(r.Path) { - id := membership.MustParseMemberIDFromKey(path.Dir(r.Path)) - var attr membership.Attributes - if err := json.Unmarshal([]byte(r.Val), &attr); err != nil { - plog.Panicf("unmarshal %s should never fail: %v", r.Val, err) - } - if a.cluster != nil { - a.cluster.UpdateAttributes(id, attr) - } - // return an empty response since there is no consumer. - return Response{} - } - if r.Path == membership.StoreClusterVersionKey() { - if a.cluster != nil { - a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability) - } - // return an empty response since there is no consumer. - return Response{} - } - return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions)) - } -} - -func (a *applierV2store) QGet(r *pb.Request) Response { - return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted)) -} - -func (a *applierV2store) Sync(r *pb.Request) Response { - a.store.DeleteExpiredKeys(time.Unix(0, r.Time)) - return Response{} -} - -// applyV2Request interprets r as a call to store.X and returns a Response interpreted -// from store.Event -func (s *EtcdServer) applyV2Request(r *pb.Request) Response { - toTTLOptions(r) - switch r.Method { - case "POST": - return s.applyV2.Post(r) - case "PUT": - return s.applyV2.Put(r) - case "DELETE": - return s.applyV2.Delete(r) - case "QGET": - return s.applyV2.QGet(r) - case "SYNC": - return s.applyV2.Sync(r) - default: - // This should never be reached, but just in case: - return Response{err: ErrUnknownMethod} - } -} - -func toTTLOptions(r *pb.Request) store.TTLOptionSet { - refresh, _ := pbutil.GetBool(r.Refresh) - ttlOptions := store.TTLOptionSet{Refresh: refresh} - if r.Expiration != 0 { - ttlOptions.ExpireTime = time.Unix(0, r.Expiration) - } - return ttlOptions -} - -func toResponse(ev *store.Event, err error) Response { - return Response{Event: ev, err: err} -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go b/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go deleted file mode 100644 index 19e96d57c6d..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package auth implements etcd authentication. -package auth - -import ( - "encoding/json" - "fmt" - "net/http" - "path" - "reflect" - "sort" - "strings" - "time" - - etcderr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/pkg/capnslog" - "golang.org/x/crypto/bcrypt" - "golang.org/x/net/context" -) - -const ( - // StorePermsPrefix is the internal prefix of the storage layer dedicated to storing user data. - StorePermsPrefix = "/2" - - // RootRoleName is the name of the ROOT role, with privileges to manage the cluster. - RootRoleName = "root" - - // GuestRoleName is the name of the role that defines the privileges of an unauthenticated user. - GuestRoleName = "guest" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/auth") -) - -var rootRole = Role{ - Role: RootRoleName, - Permissions: Permissions{ - KV: RWPermission{ - Read: []string{"/*"}, - Write: []string{"/*"}, - }, - }, -} - -var guestRole = Role{ - Role: GuestRoleName, - Permissions: Permissions{ - KV: RWPermission{ - Read: []string{"/*"}, - Write: []string{"/*"}, - }, - }, -} - -type doer interface { - Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error) -} - -type Store interface { - AllUsers() ([]string, error) - GetUser(name string) (User, error) - CreateOrUpdateUser(user User) (out User, created bool, err error) - CreateUser(user User) (User, error) - DeleteUser(name string) error - UpdateUser(user User) (User, error) - AllRoles() ([]string, error) - GetRole(name string) (Role, error) - CreateRole(role Role) error - DeleteRole(name string) error - UpdateRole(role Role) (Role, error) - AuthEnabled() bool - EnableAuth() error - DisableAuth() error - PasswordStore -} - -type PasswordStore interface { - CheckPassword(user User, password string) bool - HashPassword(password string) (string, error) -} - -type store struct { - server doer - timeout time.Duration - ensuredOnce bool - - PasswordStore -} - -type User struct { - User string `json:"user"` - Password string `json:"password,omitempty"` - Roles []string `json:"roles"` - Grant []string `json:"grant,omitempty"` - Revoke []string `json:"revoke,omitempty"` -} - -type Role struct { - Role string `json:"role"` - Permissions Permissions `json:"permissions"` - Grant *Permissions `json:"grant,omitempty"` - Revoke *Permissions `json:"revoke,omitempty"` -} - -type Permissions struct { - KV RWPermission `json:"kv"` -} - -func (p *Permissions) IsEmpty() bool { - return p == nil || (len(p.KV.Read) == 0 && len(p.KV.Write) == 0) -} - -type RWPermission struct { - Read []string `json:"read"` - Write []string `json:"write"` -} - -type Error struct { - Status int - Errmsg string -} - -func (ae Error) Error() string { return ae.Errmsg } -func (ae Error) HTTPStatus() int { return ae.Status } - -func authErr(hs int, s string, v ...interface{}) Error { - return Error{Status: hs, Errmsg: fmt.Sprintf("auth: "+s, v...)} -} - -func NewStore(server doer, timeout time.Duration) Store { - s := &store{ - server: server, - timeout: timeout, - PasswordStore: passwordStore{}, - } - return s -} - -// passwordStore implements PasswordStore using bcrypt to hash user passwords -type passwordStore struct{} - -func (_ passwordStore) CheckPassword(user User, password string) bool { - err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)) - return err == nil -} - -func (_ passwordStore) HashPassword(password string) (string, error) { - hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) - return string(hash), err -} - -func (s *store) AllUsers() ([]string, error) { - resp, err := s.requestResource("/users/", false, false) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { - return []string{}, nil - } - } - return nil, err - } - var nodes []string - for _, n := range resp.Event.Node.Nodes { - _, user := path.Split(n.Key) - nodes = append(nodes, user) - } - sort.Strings(nodes) - return nodes, nil -} - -func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) } - -// CreateOrUpdateUser should be only used for creating the new user or when you are not -// sure if it is a create or update. (When only password is passed in, we are not sure -// if it is a update or create) -func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) { - _, err = s.getUser(user.User, true) - if err == nil { - out, err = s.UpdateUser(user) - return out, false, err - } - u, err := s.CreateUser(user) - return u, true, err -} - -func (s *store) CreateUser(user User) (User, error) { - // Attach root role to root user. - if user.User == "root" { - user = attachRootRole(user) - } - u, err := s.createUserInternal(user) - if err == nil { - plog.Noticef("created user %s", user.User) - } - return u, err -} - -func (s *store) createUserInternal(user User) (User, error) { - if user.Password == "" { - return user, authErr(http.StatusBadRequest, "Cannot create user %s with an empty password", user.User) - } - hash, err := s.HashPassword(user.Password) - if err != nil { - return user, err - } - user.Password = hash - - _, err = s.createResource("/users/"+user.User, user) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeNodeExist { - return user, authErr(http.StatusConflict, "User %s already exists.", user.User) - } - } - } - return user, err -} - -func (s *store) DeleteUser(name string) error { - if s.AuthEnabled() && name == "root" { - return authErr(http.StatusForbidden, "Cannot delete root user while auth is enabled.") - } - _, err := s.deleteResource("/users/" + name) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { - return authErr(http.StatusNotFound, "User %s does not exist", name) - } - } - return err - } - plog.Noticef("deleted user %s", name) - return nil -} - -func (s *store) UpdateUser(user User) (User, error) { - old, err := s.getUser(user.User, true) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { - return user, authErr(http.StatusNotFound, "User %s doesn't exist.", user.User) - } - } - return old, err - } - - newUser, err := old.merge(user, s.PasswordStore) - if err != nil { - return old, err - } - if reflect.DeepEqual(old, newUser) { - return old, authErr(http.StatusBadRequest, "User not updated. Use grant/revoke/password to update the user.") - } - _, err = s.updateResource("/users/"+user.User, newUser) - if err == nil { - plog.Noticef("updated user %s", user.User) - } - return newUser, err -} - -func (s *store) AllRoles() ([]string, error) { - nodes := []string{RootRoleName} - resp, err := s.requestResource("/roles/", false, false) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { - return nodes, nil - } - } - return nil, err - } - for _, n := range resp.Event.Node.Nodes { - _, role := path.Split(n.Key) - nodes = append(nodes, role) - } - sort.Strings(nodes) - return nodes, nil -} - -func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) } - -func (s *store) CreateRole(role Role) error { - if role.Role == RootRoleName { - return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role) - } - _, err := s.createResource("/roles/"+role.Role, role) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeNodeExist { - return authErr(http.StatusConflict, "Role %s already exists.", role.Role) - } - } - } - if err == nil { - plog.Noticef("created new role %s", role.Role) - } - return err -} - -func (s *store) DeleteRole(name string) error { - if name == RootRoleName { - return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", name) - } - _, err := s.deleteResource("/roles/" + name) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { - return authErr(http.StatusNotFound, "Role %s doesn't exist.", name) - } - } - } - if err == nil { - plog.Noticef("deleted role %s", name) - } - return err -} - -func (s *store) UpdateRole(role Role) (Role, error) { - if role.Role == RootRoleName { - return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role) - } - old, err := s.getRole(role.Role, true) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { - return role, authErr(http.StatusNotFound, "Role %s doesn't exist.", role.Role) - } - } - return old, err - } - newRole, err := old.merge(role) - if err != nil { - return old, err - } - if reflect.DeepEqual(old, newRole) { - return old, authErr(http.StatusBadRequest, "Role not updated. Use grant/revoke to update the role.") - } - _, err = s.updateResource("/roles/"+role.Role, newRole) - if err == nil { - plog.Noticef("updated role %s", role.Role) - } - return newRole, err -} - -func (s *store) AuthEnabled() bool { - return s.detectAuth() -} - -func (s *store) EnableAuth() error { - if s.AuthEnabled() { - return authErr(http.StatusConflict, "already enabled") - } - - if _, err := s.getUser("root", true); err != nil { - return authErr(http.StatusConflict, "No root user available, please create one") - } - if _, err := s.getRole(GuestRoleName, true); err != nil { - plog.Printf("no guest role access found, creating default") - if err := s.CreateRole(guestRole); err != nil { - plog.Errorf("error creating guest role. aborting auth enable.") - return err - } - } - - if err := s.enableAuth(); err != nil { - plog.Errorf("error enabling auth (%v)", err) - return err - } - - plog.Noticef("auth: enabled auth") - return nil -} - -func (s *store) DisableAuth() error { - if !s.AuthEnabled() { - return authErr(http.StatusConflict, "already disabled") - } - - err := s.disableAuth() - if err == nil { - plog.Noticef("auth: disabled auth") - } else { - plog.Errorf("error disabling auth (%v)", err) - } - return err -} - -// merge applies the properties of the passed-in User to the User on which it -// is called and returns a new User with these modifications applied. Think of -// all Users as immutable sets of data. Merge allows you to perform the set -// operations (desired grants and revokes) atomically -func (ou User) merge(nu User, s PasswordStore) (User, error) { - var out User - if ou.User != nu.User { - return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User) - } - out.User = ou.User - if nu.Password != "" { - hash, err := s.HashPassword(nu.Password) - if err != nil { - return ou, err - } - out.Password = hash - } else { - out.Password = ou.Password - } - currentRoles := types.NewUnsafeSet(ou.Roles...) - for _, g := range nu.Grant { - if currentRoles.Contains(g) { - plog.Noticef("granting duplicate role %s for user %s", g, nu.User) - return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User)) - } - currentRoles.Add(g) - } - for _, r := range nu.Revoke { - if !currentRoles.Contains(r) { - plog.Noticef("revoking ungranted role %s for user %s", r, nu.User) - return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User)) - } - currentRoles.Remove(r) - } - out.Roles = currentRoles.Values() - sort.Strings(out.Roles) - return out, nil -} - -// merge for a role works the same as User above -- atomic Role application to -// each of the substructures. -func (r Role) merge(n Role) (Role, error) { - var out Role - var err error - if r.Role != n.Role { - return out, authErr(http.StatusConflict, "Merging role with conflicting names: %s %s", r.Role, n.Role) - } - out.Role = r.Role - out.Permissions, err = r.Permissions.Grant(n.Grant) - if err != nil { - return out, err - } - out.Permissions, err = out.Permissions.Revoke(n.Revoke) - return out, err -} - -func (r Role) HasKeyAccess(key string, write bool) bool { - if r.Role == RootRoleName { - return true - } - return r.Permissions.KV.HasAccess(key, write) -} - -func (r Role) HasRecursiveAccess(key string, write bool) bool { - if r.Role == RootRoleName { - return true - } - return r.Permissions.KV.HasRecursiveAccess(key, write) -} - -// Grant adds a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (p Permissions) Grant(n *Permissions) (Permissions, error) { - var out Permissions - var err error - if n == nil { - return p, nil - } - out.KV, err = p.KV.Grant(n.KV) - return out, err -} - -// Revoke removes a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (p Permissions) Revoke(n *Permissions) (Permissions, error) { - var out Permissions - var err error - if n == nil { - return p, nil - } - out.KV, err = p.KV.Revoke(n.KV) - return out, err -} - -// Grant adds a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (rw RWPermission) Grant(n RWPermission) (RWPermission, error) { - var out RWPermission - currentRead := types.NewUnsafeSet(rw.Read...) - for _, r := range n.Read { - if currentRead.Contains(r) { - return out, authErr(http.StatusConflict, "Granting duplicate read permission %s", r) - } - currentRead.Add(r) - } - currentWrite := types.NewUnsafeSet(rw.Write...) - for _, w := range n.Write { - if currentWrite.Contains(w) { - return out, authErr(http.StatusConflict, "Granting duplicate write permission %s", w) - } - currentWrite.Add(w) - } - out.Read = currentRead.Values() - out.Write = currentWrite.Values() - sort.Strings(out.Read) - sort.Strings(out.Write) - return out, nil -} - -// Revoke removes a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (rw RWPermission) Revoke(n RWPermission) (RWPermission, error) { - var out RWPermission - currentRead := types.NewUnsafeSet(rw.Read...) - for _, r := range n.Read { - if !currentRead.Contains(r) { - plog.Noticef("revoking ungranted read permission %s", r) - continue - } - currentRead.Remove(r) - } - currentWrite := types.NewUnsafeSet(rw.Write...) - for _, w := range n.Write { - if !currentWrite.Contains(w) { - plog.Noticef("revoking ungranted write permission %s", w) - continue - } - currentWrite.Remove(w) - } - out.Read = currentRead.Values() - out.Write = currentWrite.Values() - sort.Strings(out.Read) - sort.Strings(out.Write) - return out, nil -} - -func (rw RWPermission) HasAccess(key string, write bool) bool { - var list []string - if write { - list = rw.Write - } else { - list = rw.Read - } - for _, pat := range list { - match, err := simpleMatch(pat, key) - if err == nil && match { - return true - } - } - return false -} - -func (rw RWPermission) HasRecursiveAccess(key string, write bool) bool { - list := rw.Read - if write { - list = rw.Write - } - for _, pat := range list { - match, err := prefixMatch(pat, key) - if err == nil && match { - return true - } - } - return false -} - -func simpleMatch(pattern string, key string) (match bool, err error) { - if pattern[len(pattern)-1] == '*' { - return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil - } - return key == pattern, nil -} - -func prefixMatch(pattern string, key string) (match bool, err error) { - if pattern[len(pattern)-1] != '*' { - return false, nil - } - return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil -} - -func attachRootRole(u User) User { - inRoles := false - for _, r := range u.Roles { - if r == RootRoleName { - inRoles = true - break - } - } - if !inRoles { - u.Roles = append(u.Roles, RootRoleName) - } - return u -} - -func (s *store) getUser(name string, quorum bool) (User, error) { - resp, err := s.requestResource("/users/"+name, false, quorum) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { - return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name) - } - } - return User{}, err - } - var u User - err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u) - if err != nil { - return u, err - } - // Attach root role to root user. - if u.User == "root" { - u = attachRootRole(u) - } - return u, nil -} - -func (s *store) getRole(name string, quorum bool) (Role, error) { - if name == RootRoleName { - return rootRole, nil - } - resp, err := s.requestResource("/roles/"+name, false, quorum) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { - return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name) - } - } - return Role{}, err - } - var r Role - err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r) - return r, err -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go b/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go deleted file mode 100644 index eec700accd7..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "encoding/json" - "path" - - etcderr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" -) - -func (s *store) ensureAuthDirectories() error { - if s.ensuredOnce { - return nil - } - for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} { - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - pe := false - rr := etcdserverpb.Request{ - Method: "PUT", - Path: res, - Dir: true, - PrevExist: &pe, - } - _, err := s.server.Do(ctx, rr) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeNodeExist { - continue - } - } - plog.Errorf("failed to create auth directories in the store (%v)", err) - return err - } - } - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - pe := false - rr := etcdserverpb.Request{ - Method: "PUT", - Path: StorePermsPrefix + "/enabled", - Val: "false", - PrevExist: &pe, - } - _, err := s.server.Do(ctx, rr) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeNodeExist { - s.ensuredOnce = true - return nil - } - } - return err - } - s.ensuredOnce = true - return nil -} - -func (s *store) enableAuth() error { - _, err := s.updateResource("/enabled", true) - return err -} -func (s *store) disableAuth() error { - _, err := s.updateResource("/enabled", false) - return err -} - -func (s *store) detectAuth() bool { - if s.server == nil { - return false - } - value, err := s.requestResource("/enabled", false, false) - if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { - return false - } - } - plog.Errorf("failed to detect auth settings (%s)", err) - return false - } - - var u bool - err = json.Unmarshal([]byte(*value.Event.Node.Value), &u) - if err != nil { - plog.Errorf("internal bookkeeping value for enabled isn't valid JSON (%v)", err) - return false - } - return u -} - -func (s *store) requestResource(res string, dir, quorum bool) (etcdserver.Response, error) { - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - p := path.Join(StorePermsPrefix, res) - method := "GET" - if quorum { - method = "QGET" - } - rr := etcdserverpb.Request{ - Method: method, - Path: p, - Dir: dir, - } - return s.server.Do(ctx, rr) -} - -func (s *store) updateResource(res string, value interface{}) (etcdserver.Response, error) { - return s.setResource(res, value, true) -} -func (s *store) createResource(res string, value interface{}) (etcdserver.Response, error) { - return s.setResource(res, value, false) -} -func (s *store) setResource(res string, value interface{}, prevexist bool) (etcdserver.Response, error) { - err := s.ensureAuthDirectories() - if err != nil { - return etcdserver.Response{}, err - } - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - data, err := json.Marshal(value) - if err != nil { - return etcdserver.Response{}, err - } - p := path.Join(StorePermsPrefix, res) - rr := etcdserverpb.Request{ - Method: "PUT", - Path: p, - Val: string(data), - PrevExist: &prevexist, - } - return s.server.Do(ctx, rr) -} - -func (s *store) deleteResource(res string) (etcdserver.Response, error) { - err := s.ensureAuthDirectories() - if err != nil { - return etcdserver.Response{}, err - } - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - pex := true - p := path.Join(StorePermsPrefix, res) - rr := etcdserverpb.Request{ - Method: "DELETE", - Path: p, - PrevExist: &pex, - } - return s.server.Do(ctx, rr) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go deleted file mode 100644 index fa84ffae630..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "sort" - "time" - - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/httputil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/version" - "github.com/coreos/go-semver/semver" -) - -// isMemberBootstrapped tries to check if the given member has been bootstrapped -// in the given cluster. -func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { - rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt) - if err != nil { - return false - } - id := cl.MemberByName(member).ID - m := rcl.Member(id) - if m == nil { - return false - } - if len(m.ClientURLs) > 0 { - return true - } - return false -} - -// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and -// attempts to construct a Cluster by accessing the members endpoint on one of -// these URLs. The first URL to provide a response is used. If no URLs provide -// a response, or a Cluster cannot be successfully created from a received -// response, an error is returned. -// Each request has a 10-second timeout. Because the upper limit of TTL is 5s, -// 10 second is enough for building connection and finishing request. -func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { - return getClusterFromRemotePeers(urls, 10*time.Second, true, rt) -} - -// If logerr is true, it prints out more error messages. -func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) { - cc := &http.Client{ - Transport: rt, - Timeout: timeout, - } - for _, u := range urls { - resp, err := cc.Get(u + "/members") - if err != nil { - if logerr { - plog.Warningf("could not get cluster response from %s: %v", u, err) - } - continue - } - b, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - if logerr { - plog.Warningf("could not read the body of cluster response: %v", err) - } - continue - } - var membs []*membership.Member - if err = json.Unmarshal(b, &membs); err != nil { - if logerr { - plog.Warningf("could not unmarshal cluster response: %v", err) - } - continue - } - id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID")) - if err != nil { - if logerr { - plog.Warningf("could not parse the cluster ID from cluster res: %v", err) - } - continue - } - - // check the length of membership members - // if the membership members are present then prepare and return raft cluster - // if membership members are not present then the raft cluster formed will be - // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error - if len(membs) > 0 { - return membership.NewClusterFromMembers("", id, membs), nil - } - - return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.") - } - return nil, fmt.Errorf("could not retrieve cluster information from the given urls") -} - -// getRemotePeerURLs returns peer urls of remote members in the cluster. The -// returned list is sorted in ascending lexicographical order. -func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { - us := make([]string, 0) - for _, m := range cl.Members() { - if m.Name == local { - continue - } - us = append(us, m.PeerURLs...) - } - sort.Strings(us) - return us -} - -// getVersions returns the versions of the members in the given cluster. -// The key of the returned map is the member's ID. The value of the returned map -// is the semver versions string, including server and cluster. -// If it fails to get the version of a member, the key will be nil. -func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { - members := cl.Members() - vers := make(map[string]*version.Versions) - for _, m := range members { - if m.ID == local { - cv := "not_decided" - if cl.Version() != nil { - cv = cl.Version().String() - } - vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} - continue - } - ver, err := getVersion(m, rt) - if err != nil { - plog.Warningf("cannot get the version of member %s (%v)", m.ID, err) - vers[m.ID.String()] = nil - } else { - vers[m.ID.String()] = ver - } - } - return vers -} - -// decideClusterVersion decides the cluster version based on the versions map. -// The returned version is the min server version in the map, or nil if the min -// version in unknown. -func decideClusterVersion(vers map[string]*version.Versions) *semver.Version { - var cv *semver.Version - lv := semver.Must(semver.NewVersion(version.Version)) - - for mid, ver := range vers { - if ver == nil { - return nil - } - v, err := semver.NewVersion(ver.Server) - if err != nil { - plog.Errorf("cannot understand the version of member %s (%v)", mid, err) - return nil - } - if lv.LessThan(*v) { - plog.Warningf("the local etcd version %s is not up-to-date", lv.String()) - plog.Warningf("member %s has a higher version %s", mid, ver.Server) - } - if cv == nil { - cv = v - } else if v.LessThan(*cv) { - cv = v - } - } - return cv -} - -// isCompatibleWithCluster return true if the local member has a compatible version with -// the current running cluster. -// The version is considered as compatible when at least one of the other members in the cluster has a -// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version -// out of the range. -// We set this rule since when the local member joins, another member might be offline. -func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { - vers := getVersions(cl, local, rt) - minV := semver.Must(semver.NewVersion(version.MinClusterVersion)) - maxV := semver.Must(semver.NewVersion(version.Version)) - maxV = &semver.Version{ - Major: maxV.Major, - Minor: maxV.Minor, - } - - return isCompatibleWithVers(vers, local, minV, maxV) -} - -func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool { - var ok bool - for id, v := range vers { - // ignore comparison with local version - if id == local.String() { - continue - } - if v == nil { - continue - } - clusterv, err := semver.NewVersion(v.Cluster) - if err != nil { - plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err) - continue - } - if clusterv.LessThan(*minV) { - plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String()) - return false - } - if maxV.LessThan(*clusterv) { - plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String()) - return false - } - ok = true - } - return ok -} - -// getVersion returns the Versions of the given member via its -// peerURLs. Returns the last error if it fails to get the version. -func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { - cc := &http.Client{ - Transport: rt, - } - var ( - err error - resp *http.Response - ) - - for _, u := range m.PeerURLs { - resp, err = cc.Get(u + "/version") - if err != nil { - plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - // etcd 2.0 does not have version endpoint on peer url. - if resp.StatusCode == http.StatusNotFound { - httputil.GracefulClose(resp) - return &version.Versions{ - Server: "2.0.0", - Cluster: "2.0.0", - }, nil - } - - var b []byte - b, err = ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - var vers version.Versions - if err = json.Unmarshal(b, &vers); err != nil { - plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - return &vers, nil - } - return nil, err -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go deleted file mode 100644 index 9bcac0f076b..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/config.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "fmt" - "path/filepath" - "sort" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/pkg/netutil" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" -) - -// ServerConfig holds the configuration of etcd as taken from the command line or discovery. -type ServerConfig struct { - Name string - DiscoveryURL string - DiscoveryProxy string - ClientURLs types.URLs - PeerURLs types.URLs - DataDir string - // DedicatedWALDir config will make the etcd to write the WAL to the WALDir - // rather than the dataDir/member/wal. - DedicatedWALDir string - SnapCount uint64 - MaxSnapFiles uint - MaxWALFiles uint - InitialPeerURLsMap types.URLsMap - InitialClusterToken string - NewCluster bool - ForceNewCluster bool - PeerTLSInfo transport.TLSInfo - - TickMs uint - ElectionTicks int - BootstrapTimeout time.Duration - - AutoCompactionRetention int - QuotaBackendBytes int64 - - StrictReconfigCheck bool - - // ClientCertAuthEnabled is true when cert has been signed by the client CA. - ClientCertAuthEnabled bool -} - -// VerifyBootstrap sanity-checks the initial config for bootstrap case -// and returns an error for things that should never happen. -func (c *ServerConfig) VerifyBootstrap() error { - if err := c.hasLocalMember(); err != nil { - return err - } - if err := c.advertiseMatchesCluster(); err != nil { - return err - } - if checkDuplicateURL(c.InitialPeerURLsMap) { - return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) - } - if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" { - return fmt.Errorf("initial cluster unset and no discovery URL found") - } - return nil -} - -// VerifyJoinExisting sanity-checks the initial config for join existing cluster -// case and returns an error for things that should never happen. -func (c *ServerConfig) VerifyJoinExisting() error { - // The member has announced its peer urls to the cluster before starting; no need to - // set the configuration again. - if err := c.hasLocalMember(); err != nil { - return err - } - if checkDuplicateURL(c.InitialPeerURLsMap) { - return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap) - } - if c.DiscoveryURL != "" { - return fmt.Errorf("discovery URL should not be set when joining existing initial cluster") - } - return nil -} - -// hasLocalMember checks that the cluster at least contains the local server. -func (c *ServerConfig) hasLocalMember() error { - if urls := c.InitialPeerURLsMap[c.Name]; urls == nil { - return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name) - } - return nil -} - -// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list. -func (c *ServerConfig) advertiseMatchesCluster() error { - urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice() - urls.Sort() - sort.Strings(apurls) - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer cancel() - if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) { - umap := map[string]types.URLs{c.Name: c.PeerURLs} - return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ",")) - } - return nil -} - -func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") } - -func (c *ServerConfig) WALDir() string { - if c.DedicatedWALDir != "" { - return c.DedicatedWALDir - } - return filepath.Join(c.MemberDir(), "wal") -} - -func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") } - -func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" } - -// ReqTimeout returns timeout for request to finish. -func (c *ServerConfig) ReqTimeout() time.Duration { - // 5s for queue waiting, computation and disk IO delay - // + 2 * election timeout for possible leader election - return 5*time.Second + 2*time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond -} - -func (c *ServerConfig) electionTimeout() time.Duration { - return time.Duration(c.ElectionTicks) * time.Duration(c.TickMs) * time.Millisecond -} - -func (c *ServerConfig) peerDialTimeout() time.Duration { - // 1s for queue wait and system delay - // + one RTT, which is smaller than 1/5 election timeout - return time.Second + time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond/5 -} - -func (c *ServerConfig) PrintWithInitial() { c.print(true) } - -func (c *ServerConfig) Print() { c.print(false) } - -func (c *ServerConfig) print(initial bool) { - plog.Infof("name = %s", c.Name) - if c.ForceNewCluster { - plog.Infof("force new cluster") - } - plog.Infof("data dir = %s", c.DataDir) - plog.Infof("member dir = %s", c.MemberDir()) - if c.DedicatedWALDir != "" { - plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir) - } - plog.Infof("heartbeat = %dms", c.TickMs) - plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs)) - plog.Infof("snapshot count = %d", c.SnapCount) - if len(c.DiscoveryURL) != 0 { - plog.Infof("discovery URL= %s", c.DiscoveryURL) - if len(c.DiscoveryProxy) != 0 { - plog.Infof("discovery proxy = %s", c.DiscoveryProxy) - } - } - plog.Infof("advertise client URLs = %s", c.ClientURLs) - if initial { - plog.Infof("initial advertise peer URLs = %s", c.PeerURLs) - plog.Infof("initial cluster = %s", c.InitialPeerURLsMap) - } -} - -func checkDuplicateURL(urlsmap types.URLsMap) bool { - um := make(map[string]bool) - for _, urls := range urlsmap { - for _, url := range urls { - u := url.String() - if um[u] { - return true - } - um[u] = true - } - } - return false -} - -func (c *ServerConfig) bootstrapTimeout() time.Duration { - if c.BootstrapTimeout != 0 { - return c.BootstrapTimeout - } - return time.Second -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go b/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go deleted file mode 100644 index d513f6708d3..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "sync/atomic" -) - -// consistentIndex represents the offset of an entry in a consistent replica log. -// It implements the mvcc.ConsistentIndexGetter interface. -// It is always set to the offset of current entry before executing the entry, -// so ConsistentWatchableKV could get the consistent index from it. -type consistentIndex uint64 - -func (i *consistentIndex) setConsistentIndex(v uint64) { - atomic.StoreUint64((*uint64)(i), v) -} - -func (i *consistentIndex) ConsistentIndex() uint64 { - return atomic.LoadUint64((*uint64)(i)) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/doc.go b/vendor/github.com/coreos/etcd/etcdserver/doc.go deleted file mode 100644 index b195d2d167a..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package etcdserver defines how etcd servers interact and store their states. -package etcdserver diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go deleted file mode 100644 index 5edc155624b..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/errors.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "errors" - "fmt" -) - -var ( - ErrUnknownMethod = errors.New("etcdserver: unknown method") - ErrStopped = errors.New("etcdserver: server stopped") - ErrCanceled = errors.New("etcdserver: request cancelled") - ErrTimeout = errors.New("etcdserver: request timed out") - ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure") - ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost") - ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") - ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") - ErrNoLeader = errors.New("etcdserver: no leader") - ErrRequestTooLarge = errors.New("etcdserver: request is too large") - ErrNoSpace = errors.New("etcdserver: no space") - ErrTooManyRequests = errors.New("etcdserver: too many requests") - ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") -) - -type DiscoveryError struct { - Op string - Err error -} - -func (e DiscoveryError) Error() string { - return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go deleted file mode 100644 index f34bedf3ed3..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ /dev/null @@ -1,1045 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: etcdserver.proto -// DO NOT EDIT! - -/* - Package etcdserverpb is a generated protocol buffer package. - - It is generated from these files: - etcdserver.proto - raft_internal.proto - rpc.proto - - It has these top-level messages: - Request - Metadata - RequestHeader - InternalRaftRequest - EmptyResponse - InternalAuthenticateRequest - ResponseHeader - RangeRequest - RangeResponse - PutRequest - PutResponse - DeleteRangeRequest - DeleteRangeResponse - RequestOp - ResponseOp - Compare - TxnRequest - TxnResponse - CompactionRequest - CompactionResponse - HashRequest - HashResponse - SnapshotRequest - SnapshotResponse - WatchRequest - WatchCreateRequest - WatchCancelRequest - WatchResponse - LeaseGrantRequest - LeaseGrantResponse - LeaseRevokeRequest - LeaseRevokeResponse - LeaseKeepAliveRequest - LeaseKeepAliveResponse - LeaseTimeToLiveRequest - LeaseTimeToLiveResponse - Member - MemberAddRequest - MemberAddResponse - MemberRemoveRequest - MemberRemoveResponse - MemberUpdateRequest - MemberUpdateResponse - MemberListRequest - MemberListResponse - DefragmentRequest - DefragmentResponse - AlarmRequest - AlarmMember - AlarmResponse - StatusRequest - StatusResponse - AuthEnableRequest - AuthDisableRequest - AuthenticateRequest - AuthUserAddRequest - AuthUserGetRequest - AuthUserDeleteRequest - AuthUserChangePasswordRequest - AuthUserGrantRoleRequest - AuthUserRevokeRoleRequest - AuthRoleAddRequest - AuthRoleGetRequest - AuthUserListRequest - AuthRoleListRequest - AuthRoleDeleteRequest - AuthRoleGrantPermissionRequest - AuthRoleRevokePermissionRequest - AuthEnableResponse - AuthDisableResponse - AuthenticateResponse - AuthUserAddResponse - AuthUserGetResponse - AuthUserDeleteResponse - AuthUserChangePasswordResponse - AuthUserGrantRoleResponse - AuthUserRevokeRoleResponse - AuthRoleAddResponse - AuthRoleGetResponse - AuthRoleListResponse - AuthUserListResponse - AuthRoleDeleteResponse - AuthRoleGrantPermissionResponse - AuthRoleRevokePermissionResponse -*/ -package etcdserverpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Request struct { - ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` - Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` - Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` - Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` - Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` - PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` - PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` - PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` - Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` - Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` - Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` - Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` - Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` - Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` - Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` - Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` - Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} } - -type Metadata struct { - NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` - ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} } - -func init() { - proto.RegisterType((*Request)(nil), "etcdserverpb.Request") - proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") -} -func (m *Request) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Request) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) - dAtA[i] = 0x12 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) - i += copy(dAtA[i:], m.Method) - dAtA[i] = 0x1a - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x22 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) - i += copy(dAtA[i:], m.Val) - dAtA[i] = 0x28 - i++ - if m.Dir { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x32 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) - i += copy(dAtA[i:], m.PrevValue) - dAtA[i] = 0x38 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) - if m.PrevExist != nil { - dAtA[i] = 0x40 - i++ - if *m.PrevExist { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - dAtA[i] = 0x48 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) - dAtA[i] = 0x50 - i++ - if m.Wait { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x58 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) - dAtA[i] = 0x60 - i++ - if m.Recursive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x68 - i++ - if m.Sorted { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x70 - i++ - if m.Quorum { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x78 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if m.Stream { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.Refresh != nil { - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - if *m.Refresh { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Metadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) - dAtA[i] = 0x10 - i++ - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Etcdserver(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Etcdserver(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Request) Size() (n int) { - var l int - _ = l - n += 1 + sovEtcdserver(uint64(m.ID)) - l = len(m.Method) - n += 1 + l + sovEtcdserver(uint64(l)) - l = len(m.Path) - n += 1 + l + sovEtcdserver(uint64(l)) - l = len(m.Val) - n += 1 + l + sovEtcdserver(uint64(l)) - n += 2 - l = len(m.PrevValue) - n += 1 + l + sovEtcdserver(uint64(l)) - n += 1 + sovEtcdserver(uint64(m.PrevIndex)) - if m.PrevExist != nil { - n += 2 - } - n += 1 + sovEtcdserver(uint64(m.Expiration)) - n += 2 - n += 1 + sovEtcdserver(uint64(m.Since)) - n += 2 - n += 2 - n += 2 - n += 1 + sovEtcdserver(uint64(m.Time)) - n += 3 - if m.Refresh != nil { - n += 3 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Metadata) Size() (n int) { - var l int - _ = l - n += 1 + sovEtcdserver(uint64(m.NodeID)) - n += 1 + sovEtcdserver(uint64(m.ClusterID)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovEtcdserver(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozEtcdserver(x uint64) (n int) { - return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Request) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Request: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Method = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Val = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Dir = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEtcdserver - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrevValue = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType) - } - m.PrevIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PrevIndex |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.PrevExist = &b - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType) - } - m.Expiration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Expiration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Wait = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) - } - m.Since = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Since |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Recursive = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Sorted = bool(v != 0) - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Quorum = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) - } - m.Time = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Time |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stream = bool(v != 0) - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Refresh = &b - default: - iNdEx = preIndex - skippy, err := skipEtcdserver(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthEtcdserver - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) - } - m.NodeID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NodeID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) - } - m.ClusterID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClusterID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipEtcdserver(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthEtcdserver - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEtcdserver(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthEtcdserver - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEtcdserver - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipEtcdserver(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } - -var fileDescriptorEtcdserver = []byte{ - // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, - 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, - 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, - 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, - 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, - 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, - 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, - 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, - 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, - 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, - 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, - 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, - 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, - 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, - 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, - 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, - 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, - 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, - 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, - 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, - 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, - 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, - 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, - 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go deleted file mode 100644 index 66890c93c44..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ /dev/null @@ -1,2094 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: raft_internal.proto -// DO NOT EDIT! - -package etcdserverpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type RequestHeader struct { - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // username is a username that is associated with an auth token of gRPC connection - Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` - // auth_revision is a revision number of auth.authStore. It is not related to mvcc - AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` -} - -func (m *RequestHeader) Reset() { *m = RequestHeader{} } -func (m *RequestHeader) String() string { return proto.CompactTextString(m) } -func (*RequestHeader) ProtoMessage() {} -func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} } - -// An InternalRaftRequest is the union of all requests which can be -// sent via raft. -type InternalRaftRequest struct { - Header *RequestHeader `protobuf:"bytes,100,opt,name=header" json:"header,omitempty"` - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"` - Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` - Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"` - DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange" json:"delete_range,omitempty"` - Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"` - Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"` - LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant" json:"lease_grant,omitempty"` - LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke" json:"lease_revoke,omitempty"` - Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm" json:"alarm,omitempty"` - AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable" json:"auth_enable,omitempty"` - AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable" json:"auth_disable,omitempty"` - Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate" json:"authenticate,omitempty"` - AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd" json:"auth_user_add,omitempty"` - AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete" json:"auth_user_delete,omitempty"` - AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet" json:"auth_user_get,omitempty"` - AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword" json:"auth_user_change_password,omitempty"` - AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole" json:"auth_user_grant_role,omitempty"` - AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole" json:"auth_user_revoke_role,omitempty"` - AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList" json:"auth_user_list,omitempty"` - AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList" json:"auth_role_list,omitempty"` - AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd" json:"auth_role_add,omitempty"` - AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete" json:"auth_role_delete,omitempty"` - AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet" json:"auth_role_get,omitempty"` - AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission" json:"auth_role_grant_permission,omitempty"` - AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission" json:"auth_role_revoke_permission,omitempty"` -} - -func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } -func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } -func (*InternalRaftRequest) ProtoMessage() {} -func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} } - -type EmptyResponse struct { -} - -func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } -func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } -func (*EmptyResponse) ProtoMessage() {} -func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{2} } - -// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? -// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. -// For avoiding misusage the field, we have an internal version of AuthenticateRequest. -type InternalAuthenticateRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - // simple_token is generated in API layer (etcdserver/v3_server.go) - SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` -} - -func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } -func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } -func (*InternalAuthenticateRequest) ProtoMessage() {} -func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRaftInternal, []int{3} -} - -func init() { - proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") - proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") - proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") - proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") -} -func (m *RequestHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - } - if len(m.Username) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - } - if m.AuthRevision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) - } - return i, nil -} - -func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - } - if m.V2 != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.V2.Size())) - n1, err := m.V2.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.Range != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Range.Size())) - n2, err := m.Range.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.Put != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Put.Size())) - n3, err := m.Put.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.DeleteRange != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.DeleteRange.Size())) - n4, err := m.DeleteRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.Txn != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Txn.Size())) - n5, err := m.Txn.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if m.Compaction != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Compaction.Size())) - n6, err := m.Compaction.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.LeaseGrant != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseGrant.Size())) - n7, err := m.LeaseGrant.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.LeaseRevoke != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseRevoke.Size())) - n8, err := m.LeaseRevoke.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.Alarm != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Alarm.Size())) - n9, err := m.Alarm.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if m.Header != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x6 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size())) - n10, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.AuthEnable != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x3e - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthEnable.Size())) - n11, err := m.AuthEnable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.AuthDisable != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x3f - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthDisable.Size())) - n12, err := m.AuthDisable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - if m.Authenticate != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x3f - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.Authenticate.Size())) - n13, err := m.Authenticate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - if m.AuthUserAdd != nil { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserAdd.Size())) - n14, err := m.AuthUserAdd.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.AuthUserDelete != nil { - dAtA[i] = 0xea - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserDelete.Size())) - n15, err := m.AuthUserDelete.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if m.AuthUserGet != nil { - dAtA[i] = 0xf2 - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGet.Size())) - n16, err := m.AuthUserGet.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - if m.AuthUserChangePassword != nil { - dAtA[i] = 0xfa - i++ - dAtA[i] = 0x44 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserChangePassword.Size())) - n17, err := m.AuthUserChangePassword.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - if m.AuthUserGrantRole != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGrantRole.Size())) - n18, err := m.AuthUserGrantRole.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.AuthUserRevokeRole != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserRevokeRole.Size())) - n19, err := m.AuthUserRevokeRole.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - if m.AuthUserList != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserList.Size())) - n20, err := m.AuthUserList.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - } - if m.AuthRoleList != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x45 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleList.Size())) - n21, err := m.AuthRoleList.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - } - if m.AuthRoleAdd != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleAdd.Size())) - n22, err := m.AuthRoleAdd.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - if m.AuthRoleDelete != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleDelete.Size())) - n23, err := m.AuthRoleDelete.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - if m.AuthRoleGet != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGet.Size())) - n24, err := m.AuthRoleGet.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - if m.AuthRoleGrantPermission != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGrantPermission.Size())) - n25, err := m.AuthRoleGrantPermission.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - } - if m.AuthRoleRevokePermission != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x4b - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleRevokePermission.Size())) - n26, err := m.AuthRoleRevokePermission.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - return i, nil -} - -func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - if len(m.SimpleToken) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) - i += copy(dAtA[i:], m.SimpleToken) - } - return i, nil -} - -func encodeFixed64RaftInternal(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32RaftInternal(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *RequestHeader) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRaftInternal(uint64(m.ID)) - } - l = len(m.Username) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRevision != 0 { - n += 1 + sovRaftInternal(uint64(m.AuthRevision)) - } - return n -} - -func (m *InternalRaftRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRaftInternal(uint64(m.ID)) - } - if m.V2 != nil { - l = m.V2.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Range != nil { - l = m.Range.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Put != nil { - l = m.Put.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.DeleteRange != nil { - l = m.DeleteRange.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Txn != nil { - l = m.Txn.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Compaction != nil { - l = m.Compaction.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.LeaseGrant != nil { - l = m.LeaseGrant.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.LeaseRevoke != nil { - l = m.LeaseRevoke.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Alarm != nil { - l = m.Alarm.Size() - n += 1 + l + sovRaftInternal(uint64(l)) - } - if m.Header != nil { - l = m.Header.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthEnable != nil { - l = m.AuthEnable.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthDisable != nil { - l = m.AuthDisable.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.Authenticate != nil { - l = m.Authenticate.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserAdd != nil { - l = m.AuthUserAdd.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserDelete != nil { - l = m.AuthUserDelete.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserGet != nil { - l = m.AuthUserGet.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserChangePassword != nil { - l = m.AuthUserChangePassword.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserGrantRole != nil { - l = m.AuthUserGrantRole.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserRevokeRole != nil { - l = m.AuthUserRevokeRole.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthUserList != nil { - l = m.AuthUserList.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleList != nil { - l = m.AuthRoleList.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleAdd != nil { - l = m.AuthRoleAdd.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleDelete != nil { - l = m.AuthRoleDelete.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleGet != nil { - l = m.AuthRoleGet.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleGrantPermission != nil { - l = m.AuthRoleGrantPermission.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - if m.AuthRoleRevokePermission != nil { - l = m.AuthRoleRevokePermission.Size() - n += 2 + l + sovRaftInternal(uint64(l)) - } - return n -} - -func (m *EmptyResponse) Size() (n int) { - var l int - _ = l - return n -} - -func (m *InternalAuthenticateRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - l = len(m.SimpleToken) - if l > 0 { - n += 1 + l + sovRaftInternal(uint64(l)) - } - return n -} - -func sovRaftInternal(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRaftInternal(x uint64) (n int) { - return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RequestHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType) - } - m.AuthRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AuthRevision |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.V2 == nil { - m.V2 = &Request{} - } - if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Range == nil { - m.Range = &RangeRequest{} - } - if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Put == nil { - m.Put = &PutRequest{} - } - if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeleteRange == nil { - m.DeleteRange = &DeleteRangeRequest{} - } - if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Txn == nil { - m.Txn = &TxnRequest{} - } - if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Compaction == nil { - m.Compaction = &CompactionRequest{} - } - if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseGrant == nil { - m.LeaseGrant = &LeaseGrantRequest{} - } - if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseRevoke == nil { - m.LeaseRevoke = &LeaseRevokeRequest{} - } - if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Alarm == nil { - m.Alarm = &AlarmRequest{} - } - if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 100: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &RequestHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1000: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthEnable == nil { - m.AuthEnable = &AuthEnableRequest{} - } - if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1011: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthDisable == nil { - m.AuthDisable = &AuthDisableRequest{} - } - if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1012: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Authenticate == nil { - m.Authenticate = &InternalAuthenticateRequest{} - } - if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1100: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserAdd == nil { - m.AuthUserAdd = &AuthUserAddRequest{} - } - if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1101: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserDelete == nil { - m.AuthUserDelete = &AuthUserDeleteRequest{} - } - if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1102: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserGet == nil { - m.AuthUserGet = &AuthUserGetRequest{} - } - if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1103: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserChangePassword == nil { - m.AuthUserChangePassword = &AuthUserChangePasswordRequest{} - } - if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1104: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserGrantRole == nil { - m.AuthUserGrantRole = &AuthUserGrantRoleRequest{} - } - if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1105: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserRevokeRole == nil { - m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{} - } - if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1106: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthUserList == nil { - m.AuthUserList = &AuthUserListRequest{} - } - if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1107: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleList == nil { - m.AuthRoleList = &AuthRoleListRequest{} - } - if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1200: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleAdd == nil { - m.AuthRoleAdd = &AuthRoleAddRequest{} - } - if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1201: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleDelete == nil { - m.AuthRoleDelete = &AuthRoleDeleteRequest{} - } - if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1202: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleGet == nil { - m.AuthRoleGet = &AuthRoleGetRequest{} - } - if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1203: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleGrantPermission == nil { - m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{} - } - if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1204: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AuthRoleRevokePermission == nil { - m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{} - } - if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EmptyResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRaftInternal - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SimpleToken = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaftInternal(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaftInternal - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRaftInternal(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRaftInternal - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaftInternal - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRaftInternal(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) } - -var fileDescriptorRaftInternal = []byte{ - // 837 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, - 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f, - 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c, - 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2, - 0x0f, 0xd4, 0x17, 0x50, 0xbc, 0xf1, 0xca, 0x1b, 0x7d, 0x00, 0x67, 0x3f, 0x92, 0x34, 0x6d, 0xca, - 0x5d, 0x72, 0xce, 0xff, 0xfc, 0xce, 0xd9, 0xec, 0x7f, 0xbb, 0x45, 0xb3, 0x8c, 0x1e, 0x72, 0xd3, - 0x76, 0x39, 0x30, 0x97, 0x3a, 0xab, 0x3e, 0xf3, 0xb8, 0x87, 0x0b, 0xc0, 0x1b, 0x56, 0x00, 0xac, - 0x0b, 0xcc, 0x3f, 0x28, 0xcd, 0x35, 0xbd, 0xa6, 0x27, 0x13, 0xf7, 0xc4, 0x93, 0xd2, 0x94, 0x66, - 0x62, 0x8d, 0x8e, 0xe4, 0x98, 0xdf, 0x50, 0x8f, 0x95, 0x67, 0x68, 0xda, 0x80, 0x17, 0x1d, 0x08, - 0xf8, 0x16, 0x50, 0x0b, 0x18, 0x2e, 0xa2, 0xec, 0x76, 0x9d, 0x64, 0x16, 0x33, 0x2b, 0x63, 0x46, - 0x76, 0xbb, 0x8e, 0x4b, 0x68, 0xaa, 0x13, 0x88, 0x96, 0x6d, 0x20, 0xd9, 0xc5, 0xcc, 0x4a, 0xce, - 0x88, 0xde, 0xf1, 0x32, 0x9a, 0xa6, 0x1d, 0xde, 0x32, 0x19, 0x74, 0xed, 0xc0, 0xf6, 0x5c, 0x32, - 0x2a, 0xcb, 0x0a, 0x22, 0x68, 0xe8, 0x58, 0xe5, 0x4f, 0x11, 0xcd, 0x6e, 0xeb, 0xa9, 0x0d, 0x7a, - 0xc8, 0x75, 0xbb, 0x81, 0x46, 0xd7, 0x50, 0xb6, 0x5b, 0x95, 0x2d, 0xf2, 0xd5, 0xf9, 0xd5, 0xde, - 0x75, 0xad, 0xea, 0x12, 0x23, 0xdb, 0xad, 0xe2, 0xfb, 0x68, 0x9c, 0x51, 0xb7, 0x09, 0xb2, 0x57, - 0xbe, 0x5a, 0xea, 0x53, 0x8a, 0x54, 0x28, 0x57, 0x42, 0x7c, 0x0b, 0x8d, 0xfa, 0x1d, 0x4e, 0xc6, - 0xa4, 0x9e, 0x24, 0xf5, 0x7b, 0x9d, 0x70, 0x1e, 0x43, 0x88, 0xf0, 0x3a, 0x2a, 0x58, 0xe0, 0x00, - 0x07, 0x53, 0x35, 0x19, 0x97, 0x45, 0x8b, 0xc9, 0xa2, 0xba, 0x54, 0x24, 0x5a, 0xe5, 0xad, 0x38, - 0x26, 0x1a, 0xf2, 0x63, 0x97, 0x4c, 0xa4, 0x35, 0xdc, 0x3f, 0x76, 0xa3, 0x86, 0xfc, 0xd8, 0xc5, - 0x0f, 0x10, 0x6a, 0x78, 0x6d, 0x9f, 0x36, 0xb8, 0xf8, 0x7e, 0x93, 0xb2, 0xe4, 0x6a, 0xb2, 0x64, - 0x3d, 0xca, 0x87, 0x95, 0x3d, 0x25, 0xf8, 0x21, 0xca, 0x3b, 0x40, 0x03, 0x30, 0x9b, 0x8c, 0xba, - 0x9c, 0x4c, 0xa5, 0x11, 0x76, 0x84, 0x60, 0x53, 0xe4, 0x23, 0x82, 0x13, 0x85, 0xc4, 0x9a, 0x15, - 0x81, 0x41, 0xd7, 0x3b, 0x02, 0x92, 0x4b, 0x5b, 0xb3, 0x44, 0x18, 0x52, 0x10, 0xad, 0xd9, 0x89, - 0x63, 0x62, 0x5b, 0xa8, 0x43, 0x59, 0x9b, 0xa0, 0xb4, 0x6d, 0xa9, 0x89, 0x54, 0xb4, 0x2d, 0x52, - 0x88, 0xd7, 0xd0, 0x44, 0x4b, 0x5a, 0x8e, 0x58, 0xb2, 0x64, 0x21, 0x75, 0xcf, 0x95, 0x2b, 0x0d, - 0x2d, 0xc5, 0x35, 0x94, 0x97, 0x8e, 0x03, 0x97, 0x1e, 0x38, 0x40, 0x7e, 0xa7, 0x7e, 0xb0, 0x5a, - 0x87, 0xb7, 0x36, 0xa4, 0x20, 0x5a, 0x2e, 0x8d, 0x42, 0xb8, 0x8e, 0xa4, 0x3f, 0x4d, 0xcb, 0x0e, - 0x24, 0xe3, 0xef, 0x64, 0xda, 0x7a, 0x05, 0xa3, 0xae, 0x14, 0xd1, 0x7a, 0x69, 0x1c, 0xc3, 0xbb, - 0x8a, 0x02, 0x2e, 0xb7, 0x1b, 0x94, 0x03, 0xf9, 0xa7, 0x28, 0x37, 0x93, 0x94, 0xd0, 0xf7, 0xb5, - 0x1e, 0x69, 0x88, 0x4b, 0xd4, 0xe3, 0x0d, 0x7d, 0x94, 0xc4, 0xd9, 0x32, 0xa9, 0x65, 0x91, 0x8f, - 0x53, 0xc3, 0xc6, 0x7a, 0x1c, 0x00, 0xab, 0x59, 0x56, 0x62, 0x2c, 0x1d, 0xc3, 0xbb, 0x68, 0x26, - 0xc6, 0x28, 0x4f, 0x92, 0x4f, 0x8a, 0xb4, 0x9c, 0x4e, 0xd2, 0x66, 0xd6, 0xb0, 0x22, 0x4d, 0x84, - 0x93, 0x63, 0x35, 0x81, 0x93, 0xcf, 0xe7, 0x8e, 0xb5, 0x09, 0x7c, 0x60, 0xac, 0x4d, 0xe0, 0xb8, - 0x89, 0xae, 0xc4, 0x98, 0x46, 0x4b, 0x9c, 0x12, 0xd3, 0xa7, 0x41, 0xf0, 0xd2, 0x63, 0x16, 0xf9, - 0xa2, 0x90, 0xb7, 0xd3, 0x91, 0xeb, 0x52, 0xbd, 0xa7, 0xc5, 0x21, 0xfd, 0x12, 0x4d, 0x4d, 0xe3, - 0x27, 0x68, 0xae, 0x67, 0x5e, 0x61, 0x6f, 0x93, 0x79, 0x0e, 0x90, 0x53, 0xd5, 0xe3, 0xfa, 0x90, - 0xb1, 0xe5, 0xd1, 0xf0, 0xe2, 0xad, 0xbe, 0x48, 0xfb, 0x33, 0xf8, 0x29, 0x9a, 0x8f, 0xc9, 0xea, - 0xa4, 0x28, 0xf4, 0x57, 0x85, 0xbe, 0x91, 0x8e, 0xd6, 0x47, 0xa6, 0x87, 0x8d, 0xe9, 0x40, 0x0a, - 0x6f, 0xa1, 0x62, 0x0c, 0x77, 0xec, 0x80, 0x93, 0x6f, 0x8a, 0xba, 0x94, 0x4e, 0xdd, 0xb1, 0x03, - 0x9e, 0xf0, 0x51, 0x18, 0x8c, 0x48, 0x62, 0x34, 0x45, 0xfa, 0x3e, 0x94, 0x24, 0x5a, 0x0f, 0x90, - 0xc2, 0x60, 0xb4, 0xf5, 0x92, 0x24, 0x1c, 0xf9, 0x26, 0x37, 0x6c, 0xeb, 0x45, 0x4d, 0xbf, 0x23, - 0x75, 0x2c, 0x72, 0xa4, 0xc4, 0x68, 0x47, 0xbe, 0xcd, 0x0d, 0x73, 0xa4, 0xa8, 0x4a, 0x71, 0x64, - 0x1c, 0x4e, 0x8e, 0x25, 0x1c, 0xf9, 0xee, 0xdc, 0xb1, 0xfa, 0x1d, 0xa9, 0x63, 0xf8, 0x39, 0x2a, - 0xf5, 0x60, 0xa4, 0x51, 0x7c, 0x60, 0x6d, 0x3b, 0x90, 0xf7, 0xd8, 0x7b, 0xc5, 0xbc, 0x33, 0x84, - 0x29, 0xe4, 0x7b, 0x91, 0x3a, 0xe4, 0x5f, 0xa6, 0xe9, 0x79, 0xdc, 0x46, 0x0b, 0x71, 0x2f, 0x6d, - 0x9d, 0x9e, 0x66, 0x1f, 0x54, 0xb3, 0xbb, 0xe9, 0xcd, 0x94, 0x4b, 0x06, 0xbb, 0x11, 0x3a, 0x44, - 0x50, 0xb9, 0x80, 0xa6, 0x37, 0xda, 0x3e, 0x7f, 0x65, 0x40, 0xe0, 0x7b, 0x6e, 0x00, 0x15, 0x1f, - 0x2d, 0x9c, 0xf3, 0x43, 0x84, 0x31, 0x1a, 0x93, 0xb7, 0x7b, 0x46, 0xde, 0xee, 0xf2, 0x59, 0xdc, - 0xfa, 0xd1, 0xf9, 0xd4, 0xb7, 0x7e, 0xf8, 0x8e, 0x97, 0x50, 0x21, 0xb0, 0xdb, 0xbe, 0x03, 0x26, - 0xf7, 0x8e, 0x40, 0x5d, 0xfa, 0x39, 0x23, 0xaf, 0x62, 0xfb, 0x22, 0xf4, 0x68, 0xee, 0xe4, 0x67, - 0x79, 0xe4, 0xe4, 0xac, 0x9c, 0x39, 0x3d, 0x2b, 0x67, 0x7e, 0x9c, 0x95, 0x33, 0xaf, 0x7f, 0x95, - 0x47, 0x0e, 0x26, 0xe4, 0x5f, 0x8e, 0xb5, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xc9, 0xfc, - 0x0e, 0xca, 0x08, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go deleted file mode 100644 index b28f2e50e3c..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ /dev/null @@ -1,16258 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: rpc.proto -// DO NOT EDIT! - -package etcdserverpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" - - authpb "github.com/coreos/etcd/auth/authpb" - - context "golang.org/x/net/context" - - grpc "google.golang.org/grpc" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type AlarmType int32 - -const ( - AlarmType_NONE AlarmType = 0 - AlarmType_NOSPACE AlarmType = 1 -) - -var AlarmType_name = map[int32]string{ - 0: "NONE", - 1: "NOSPACE", -} -var AlarmType_value = map[string]int32{ - "NONE": 0, - "NOSPACE": 1, -} - -func (x AlarmType) String() string { - return proto.EnumName(AlarmType_name, int32(x)) -} -func (AlarmType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } - -type RangeRequest_SortOrder int32 - -const ( - RangeRequest_NONE RangeRequest_SortOrder = 0 - RangeRequest_ASCEND RangeRequest_SortOrder = 1 - RangeRequest_DESCEND RangeRequest_SortOrder = 2 -) - -var RangeRequest_SortOrder_name = map[int32]string{ - 0: "NONE", - 1: "ASCEND", - 2: "DESCEND", -} -var RangeRequest_SortOrder_value = map[string]int32{ - "NONE": 0, - "ASCEND": 1, - "DESCEND": 2, -} - -func (x RangeRequest_SortOrder) String() string { - return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) -} -func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 0} } - -type RangeRequest_SortTarget int32 - -const ( - RangeRequest_KEY RangeRequest_SortTarget = 0 - RangeRequest_VERSION RangeRequest_SortTarget = 1 - RangeRequest_CREATE RangeRequest_SortTarget = 2 - RangeRequest_MOD RangeRequest_SortTarget = 3 - RangeRequest_VALUE RangeRequest_SortTarget = 4 -) - -var RangeRequest_SortTarget_name = map[int32]string{ - 0: "KEY", - 1: "VERSION", - 2: "CREATE", - 3: "MOD", - 4: "VALUE", -} -var RangeRequest_SortTarget_value = map[string]int32{ - "KEY": 0, - "VERSION": 1, - "CREATE": 2, - "MOD": 3, - "VALUE": 4, -} - -func (x RangeRequest_SortTarget) String() string { - return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) -} -func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 1} } - -type Compare_CompareResult int32 - -const ( - Compare_EQUAL Compare_CompareResult = 0 - Compare_GREATER Compare_CompareResult = 1 - Compare_LESS Compare_CompareResult = 2 - Compare_NOT_EQUAL Compare_CompareResult = 3 -) - -var Compare_CompareResult_name = map[int32]string{ - 0: "EQUAL", - 1: "GREATER", - 2: "LESS", - 3: "NOT_EQUAL", -} -var Compare_CompareResult_value = map[string]int32{ - "EQUAL": 0, - "GREATER": 1, - "LESS": 2, - "NOT_EQUAL": 3, -} - -func (x Compare_CompareResult) String() string { - return proto.EnumName(Compare_CompareResult_name, int32(x)) -} -func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 0} } - -type Compare_CompareTarget int32 - -const ( - Compare_VERSION Compare_CompareTarget = 0 - Compare_CREATE Compare_CompareTarget = 1 - Compare_MOD Compare_CompareTarget = 2 - Compare_VALUE Compare_CompareTarget = 3 -) - -var Compare_CompareTarget_name = map[int32]string{ - 0: "VERSION", - 1: "CREATE", - 2: "MOD", - 3: "VALUE", -} -var Compare_CompareTarget_value = map[string]int32{ - "VERSION": 0, - "CREATE": 1, - "MOD": 2, - "VALUE": 3, -} - -func (x Compare_CompareTarget) String() string { - return proto.EnumName(Compare_CompareTarget_name, int32(x)) -} -func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} } - -type WatchCreateRequest_FilterType int32 - -const ( - // filter out put event. - WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0 - // filter out delete event. - WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1 -) - -var WatchCreateRequest_FilterType_name = map[int32]string{ - 0: "NOPUT", - 1: "NODELETE", -} -var WatchCreateRequest_FilterType_value = map[string]int32{ - "NOPUT": 0, - "NODELETE": 1, -} - -func (x WatchCreateRequest_FilterType) String() string { - return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) -} -func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{19, 0} -} - -type AlarmRequest_AlarmAction int32 - -const ( - AlarmRequest_GET AlarmRequest_AlarmAction = 0 - AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1 - AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2 -) - -var AlarmRequest_AlarmAction_name = map[int32]string{ - 0: "GET", - 1: "ACTIVATE", - 2: "DEACTIVATE", -} -var AlarmRequest_AlarmAction_value = map[string]int32{ - "GET": 0, - "ACTIVATE": 1, - "DEACTIVATE": 2, -} - -func (x AlarmRequest_AlarmAction) String() string { - return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) -} -func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{41, 0} -} - -type ResponseHeader struct { - // cluster_id is the ID of the cluster which sent the response. - ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // member_id is the ID of the member which sent the response. - MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` - // revision is the key-value store revision when the request was applied. - Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` - // raft_term is the raft term when the request was applied. - RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` -} - -func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } -func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } -func (*ResponseHeader) ProtoMessage() {} -func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } - -type RangeRequest struct { - // key is the first key for the range. If range_end is not given, the request only looks up key. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the upper bound on the requested range [key, range_end). - // If range_end is '\0', the range is all keys >= key. - // If the range_end is one bit larger than the given key, - // then the range requests get the all keys with the prefix (the given key). - // If both key and range_end are '\0', then range requests returns all keys. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // limit is a limit on the number of keys returned for the request. - Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` - // revision is the point-in-time of the key-value store to use for the range. - // If revision is less or equal to zero, the range is over the newest key-value store. - // If the revision has been compacted, ErrCompacted is returned as a response. - Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"` - // sort_order is the order for returned sorted results. - SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"` - // sort_target is the key-value field to use for sorting. - SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"` - // serializable sets the range request to use serializable member-local reads. - // Range requests are linearizable by default; linearizable requests have higher - // latency and lower throughput than serializable requests but reflect the current - // consensus of the cluster. For better performance, in exchange for possible stale reads, - // a serializable range request is served locally without needing to reach consensus - // with other nodes in the cluster. - Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"` - // keys_only when set returns only the keys and not the values. - KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"` - // count_only when set returns only the count of the keys in the range. - CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"` - // min_mod_revision is the lower bound for returned key mod revisions; all keys with - // lesser mod revisions will be filtered away. - MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"` - // max_mod_revision is the upper bound for returned key mod revisions; all keys with - // greater mod revisions will be filtered away. - MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` - // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create trevisions will be filtered away. - MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` - // max_create_revision is the upper bound for returned key create revisions; all keys with - // greater create revisions will be filtered away. - MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` -} - -func (m *RangeRequest) Reset() { *m = RangeRequest{} } -func (m *RangeRequest) String() string { return proto.CompactTextString(m) } -func (*RangeRequest) ProtoMessage() {} -func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } - -type RangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // kvs is the list of key-value pairs matched by the range request. - // kvs is empty when count is requested. - Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"` - // more indicates if there are more keys to return in the requested range. - More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` - // count is set to the number of keys within the range when requested. - Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` -} - -func (m *RangeResponse) Reset() { *m = RangeResponse{} } -func (m *RangeResponse) String() string { return proto.CompactTextString(m) } -func (*RangeResponse) ProtoMessage() {} -func (*RangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} } - -func (m *RangeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { - if m != nil { - return m.Kvs - } - return nil -} - -type PutRequest struct { - // key is the key, in bytes, to put into the key-value store. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // value is the value, in bytes, to associate with the key in the key-value store. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // lease is the lease ID to associate with the key in the key-value store. A lease - // value of 0 indicates no lease. - Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"` - // If prev_kv is set, etcd gets the previous key-value pair before changing it. - // The previous key-value pair will be returned in the put response. - PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} -func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } - -type PutResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // if prev_kv is set in the request, the previous key-value pair will be returned. - PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} -func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} } - -func (m *PutResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue { - if m != nil { - return m.PrevKv - } - return nil -} - -type DeleteRangeRequest struct { - // key is the first key to delete in the range. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the key following the last key to delete for the range [key, range_end). - // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all - // the all keys with the prefix (the given key). - // If range_end is '\0', the range is all keys greater than or equal to the key argument. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delte response. - PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` -} - -func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } -func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeRequest) ProtoMessage() {} -func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } - -type DeleteRangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // deleted is the number of keys deleted by the delete range request. - Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` - // if prev_kv is set in the request, the previous key-value pairs will be returned. - PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"` -} - -func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } -func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeResponse) ProtoMessage() {} -func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} } - -func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { - if m != nil { - return m.PrevKvs - } - return nil -} - -type RequestOp struct { - // request is a union of request types accepted by a transaction. - // - // Types that are valid to be assigned to Request: - // *RequestOp_RequestRange - // *RequestOp_RequestPut - // *RequestOp_RequestDeleteRange - Request isRequestOp_Request `protobuf_oneof:"request"` -} - -func (m *RequestOp) Reset() { *m = RequestOp{} } -func (m *RequestOp) String() string { return proto.CompactTextString(m) } -func (*RequestOp) ProtoMessage() {} -func (*RequestOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} } - -type isRequestOp_Request interface { - isRequestOp_Request() - MarshalTo([]byte) (int, error) - Size() int -} - -type RequestOp_RequestRange struct { - RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,oneof"` -} -type RequestOp_RequestPut struct { - RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,oneof"` -} -type RequestOp_RequestDeleteRange struct { - RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"` -} - -func (*RequestOp_RequestRange) isRequestOp_Request() {} -func (*RequestOp_RequestPut) isRequestOp_Request() {} -func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} - -func (m *RequestOp) GetRequest() isRequestOp_Request { - if m != nil { - return m.Request - } - return nil -} - -func (m *RequestOp) GetRequestRange() *RangeRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok { - return x.RequestRange - } - return nil -} - -func (m *RequestOp) GetRequestPut() *PutRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok { - return x.RequestPut - } - return nil -} - -func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { - if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok { - return x.RequestDeleteRange - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{ - (*RequestOp_RequestRange)(nil), - (*RequestOp_RequestPut)(nil), - (*RequestOp_RequestDeleteRange)(nil), - } -} - -func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*RequestOp) - // request - switch x := m.Request.(type) { - case *RequestOp_RequestRange: - _ = b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RequestRange); err != nil { - return err - } - case *RequestOp_RequestPut: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RequestPut); err != nil { - return err - } - case *RequestOp_RequestDeleteRange: - _ = b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.RequestDeleteRange); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("RequestOp.Request has unexpected type %T", x) - } - return nil -} - -func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*RequestOp) - switch tag { - case 1: // request.request_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(RangeRequest) - err := b.DecodeMessage(msg) - m.Request = &RequestOp_RequestRange{msg} - return true, err - case 2: // request.request_put - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PutRequest) - err := b.DecodeMessage(msg) - m.Request = &RequestOp_RequestPut{msg} - return true, err - case 3: // request.request_delete_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(DeleteRangeRequest) - err := b.DecodeMessage(msg) - m.Request = &RequestOp_RequestDeleteRange{msg} - return true, err - default: - return false, nil - } -} - -func _RequestOp_OneofSizer(msg proto.Message) (n int) { - m := msg.(*RequestOp) - // request - switch x := m.Request.(type) { - case *RequestOp_RequestRange: - s := proto.Size(x.RequestRange) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *RequestOp_RequestPut: - s := proto.Size(x.RequestPut) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *RequestOp_RequestDeleteRange: - s := proto.Size(x.RequestDeleteRange) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ResponseOp struct { - // response is a union of response types returned by a transaction. - // - // Types that are valid to be assigned to Response: - // *ResponseOp_ResponseRange - // *ResponseOp_ResponsePut - // *ResponseOp_ResponseDeleteRange - Response isResponseOp_Response `protobuf_oneof:"response"` -} - -func (m *ResponseOp) Reset() { *m = ResponseOp{} } -func (m *ResponseOp) String() string { return proto.CompactTextString(m) } -func (*ResponseOp) ProtoMessage() {} -func (*ResponseOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{8} } - -type isResponseOp_Response interface { - isResponseOp_Response() - MarshalTo([]byte) (int, error) - Size() int -} - -type ResponseOp_ResponseRange struct { - ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,oneof"` -} -type ResponseOp_ResponsePut struct { - ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,oneof"` -} -type ResponseOp_ResponseDeleteRange struct { - ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"` -} - -func (*ResponseOp_ResponseRange) isResponseOp_Response() {} -func (*ResponseOp_ResponsePut) isResponseOp_Response() {} -func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} - -func (m *ResponseOp) GetResponse() isResponseOp_Response { - if m != nil { - return m.Response - } - return nil -} - -func (m *ResponseOp) GetResponseRange() *RangeResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok { - return x.ResponseRange - } - return nil -} - -func (m *ResponseOp) GetResponsePut() *PutResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok { - return x.ResponsePut - } - return nil -} - -func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { - if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok { - return x.ResponseDeleteRange - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{ - (*ResponseOp_ResponseRange)(nil), - (*ResponseOp_ResponsePut)(nil), - (*ResponseOp_ResponseDeleteRange)(nil), - } -} - -func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ResponseOp) - // response - switch x := m.Response.(type) { - case *ResponseOp_ResponseRange: - _ = b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ResponseRange); err != nil { - return err - } - case *ResponseOp_ResponsePut: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ResponsePut); err != nil { - return err - } - case *ResponseOp_ResponseDeleteRange: - _ = b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ResponseOp.Response has unexpected type %T", x) - } - return nil -} - -func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ResponseOp) - switch tag { - case 1: // response.response_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(RangeResponse) - err := b.DecodeMessage(msg) - m.Response = &ResponseOp_ResponseRange{msg} - return true, err - case 2: // response.response_put - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PutResponse) - err := b.DecodeMessage(msg) - m.Response = &ResponseOp_ResponsePut{msg} - return true, err - case 3: // response.response_delete_range - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(DeleteRangeResponse) - err := b.DecodeMessage(msg) - m.Response = &ResponseOp_ResponseDeleteRange{msg} - return true, err - default: - return false, nil - } -} - -func _ResponseOp_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ResponseOp) - // response - switch x := m.Response.(type) { - case *ResponseOp_ResponseRange: - s := proto.Size(x.ResponseRange) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseOp_ResponsePut: - s := proto.Size(x.ResponsePut) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseOp_ResponseDeleteRange: - s := proto.Size(x.ResponseDeleteRange) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Compare struct { - // result is logical comparison operation for this comparison. - Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"` - // target is the key-value field to inspect for the comparison. - Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"` - // key is the subject key for the comparison operation. - Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` - // Types that are valid to be assigned to TargetUnion: - // *Compare_Version - // *Compare_CreateRevision - // *Compare_ModRevision - // *Compare_Value - TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` -} - -func (m *Compare) Reset() { *m = Compare{} } -func (m *Compare) String() string { return proto.CompactTextString(m) } -func (*Compare) ProtoMessage() {} -func (*Compare) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9} } - -type isCompare_TargetUnion interface { - isCompare_TargetUnion() - MarshalTo([]byte) (int, error) - Size() int -} - -type Compare_Version struct { - Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"` -} -type Compare_CreateRevision struct { - CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"` -} -type Compare_ModRevision struct { - ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"` -} -type Compare_Value struct { - Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"` -} - -func (*Compare_Version) isCompare_TargetUnion() {} -func (*Compare_CreateRevision) isCompare_TargetUnion() {} -func (*Compare_ModRevision) isCompare_TargetUnion() {} -func (*Compare_Value) isCompare_TargetUnion() {} - -func (m *Compare) GetTargetUnion() isCompare_TargetUnion { - if m != nil { - return m.TargetUnion - } - return nil -} - -func (m *Compare) GetVersion() int64 { - if x, ok := m.GetTargetUnion().(*Compare_Version); ok { - return x.Version - } - return 0 -} - -func (m *Compare) GetCreateRevision() int64 { - if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok { - return x.CreateRevision - } - return 0 -} - -func (m *Compare) GetModRevision() int64 { - if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok { - return x.ModRevision - } - return 0 -} - -func (m *Compare) GetValue() []byte { - if x, ok := m.GetTargetUnion().(*Compare_Value); ok { - return x.Value - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{ - (*Compare_Version)(nil), - (*Compare_CreateRevision)(nil), - (*Compare_ModRevision)(nil), - (*Compare_Value)(nil), - } -} - -func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Compare) - // target_union - switch x := m.TargetUnion.(type) { - case *Compare_Version: - _ = b.EncodeVarint(4<<3 | proto.WireVarint) - _ = b.EncodeVarint(uint64(x.Version)) - case *Compare_CreateRevision: - _ = b.EncodeVarint(5<<3 | proto.WireVarint) - _ = b.EncodeVarint(uint64(x.CreateRevision)) - case *Compare_ModRevision: - _ = b.EncodeVarint(6<<3 | proto.WireVarint) - _ = b.EncodeVarint(uint64(x.ModRevision)) - case *Compare_Value: - _ = b.EncodeVarint(7<<3 | proto.WireBytes) - _ = b.EncodeRawBytes(x.Value) - case nil: - default: - return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x) - } - return nil -} - -func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Compare) - switch tag { - case 4: // target_union.version - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.TargetUnion = &Compare_Version{int64(x)} - return true, err - case 5: // target_union.create_revision - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.TargetUnion = &Compare_CreateRevision{int64(x)} - return true, err - case 6: // target_union.mod_revision - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.TargetUnion = &Compare_ModRevision{int64(x)} - return true, err - case 7: // target_union.value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.TargetUnion = &Compare_Value{x} - return true, err - default: - return false, nil - } -} - -func _Compare_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Compare) - // target_union - switch x := m.TargetUnion.(type) { - case *Compare_Version: - n += proto.SizeVarint(4<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Version)) - case *Compare_CreateRevision: - n += proto.SizeVarint(5<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.CreateRevision)) - case *Compare_ModRevision: - n += proto.SizeVarint(6<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.ModRevision)) - case *Compare_Value: - n += proto.SizeVarint(7<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Value))) - n += len(x.Value) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// From google paxosdb paper: -// Our implementation hinges around a powerful primitive which we call MultiOp. All other database -// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically -// and consists of three components: -// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check -// for the absence or presence of a value, or compare with a given value. Two different tests in the guard -// may apply to the same or different entries in the database. All tests in the guard are applied and -// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise -// it executes f op (see item 3 below). -// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or -// lookup operation, and applies to a single database entry. Two different operations in the list may apply -// to the same or different entries in the database. These operations are executed -// if guard evaluates to -// true. -// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. -type TxnRequest struct { - // compare is a list of predicates representing a conjunction of terms. - // If the comparisons succeed, then the success requests will be processed in order, - // and the response will contain their respective responses in order. - // If the comparisons fail, then the failure requests will be processed in order, - // and the response will contain their respective responses in order. - Compare []*Compare `protobuf:"bytes,1,rep,name=compare" json:"compare,omitempty"` - // success is a list of requests which will be applied when compare evaluates to true. - Success []*RequestOp `protobuf:"bytes,2,rep,name=success" json:"success,omitempty"` - // failure is a list of requests which will be applied when compare evaluates to false. - Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure" json:"failure,omitempty"` -} - -func (m *TxnRequest) Reset() { *m = TxnRequest{} } -func (m *TxnRequest) String() string { return proto.CompactTextString(m) } -func (*TxnRequest) ProtoMessage() {} -func (*TxnRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{10} } - -func (m *TxnRequest) GetCompare() []*Compare { - if m != nil { - return m.Compare - } - return nil -} - -func (m *TxnRequest) GetSuccess() []*RequestOp { - if m != nil { - return m.Success - } - return nil -} - -func (m *TxnRequest) GetFailure() []*RequestOp { - if m != nil { - return m.Failure - } - return nil -} - -type TxnResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // succeeded is set to true if the compare evaluated to true or false otherwise. - Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` - // responses is a list of responses corresponding to the results from applying - // success if succeeded is true or failure if succeeded is false. - Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"` -} - -func (m *TxnResponse) Reset() { *m = TxnResponse{} } -func (m *TxnResponse) String() string { return proto.CompactTextString(m) } -func (*TxnResponse) ProtoMessage() {} -func (*TxnResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{11} } - -func (m *TxnResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TxnResponse) GetResponses() []*ResponseOp { - if m != nil { - return m.Responses - } - return nil -} - -// CompactionRequest compacts the key-value store up to a given revision. All superseded keys -// with a revision less than the compaction revision will be removed. -type CompactionRequest struct { - // revision is the key-value store revision for the compaction operation. - Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` - // physical is set so the RPC will wait until the compaction is physically - // applied to the local database such that compacted entries are totally - // removed from the backend database. - Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` -} - -func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } -func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } -func (*CompactionRequest) ProtoMessage() {} -func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } - -type CompactionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } -func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } -func (*CompactionResponse) ProtoMessage() {} -func (*CompactionResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{13} } - -func (m *CompactionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type HashRequest struct { -} - -func (m *HashRequest) Reset() { *m = HashRequest{} } -func (m *HashRequest) String() string { return proto.CompactTextString(m) } -func (*HashRequest) ProtoMessage() {} -func (*HashRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{14} } - -type HashResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // hash is the hash value computed from the responding member's key-value store. - Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` -} - -func (m *HashResponse) Reset() { *m = HashResponse{} } -func (m *HashResponse) String() string { return proto.CompactTextString(m) } -func (*HashResponse) ProtoMessage() {} -func (*HashResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{15} } - -func (m *HashResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type SnapshotRequest struct { -} - -func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } -func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*SnapshotRequest) ProtoMessage() {} -func (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{16} } - -type SnapshotResponse struct { - // header has the current key-value store information. The first header in the snapshot - // stream indicates the point in time of the snapshot. - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // remaining_bytes is the number of blob bytes to be sent after this message - RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` - // blob contains the next chunk of the snapshot in the snapshot stream. - Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` -} - -func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } -func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*SnapshotResponse) ProtoMessage() {} -func (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{17} } - -func (m *SnapshotResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type WatchRequest struct { - // request_union is a request to either create a new watcher or cancel an existing watcher. - // - // Types that are valid to be assigned to RequestUnion: - // *WatchRequest_CreateRequest - // *WatchRequest_CancelRequest - RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` -} - -func (m *WatchRequest) Reset() { *m = WatchRequest{} } -func (m *WatchRequest) String() string { return proto.CompactTextString(m) } -func (*WatchRequest) ProtoMessage() {} -func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{18} } - -type isWatchRequest_RequestUnion interface { - isWatchRequest_RequestUnion() - MarshalTo([]byte) (int, error) - Size() int -} - -type WatchRequest_CreateRequest struct { - CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"` -} -type WatchRequest_CancelRequest struct { - CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"` -} - -func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} -func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {} - -func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion { - if m != nil { - return m.RequestUnion - } - return nil -} - -func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest { - if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok { - return x.CreateRequest - } - return nil -} - -func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest { - if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok { - return x.CancelRequest - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{ - (*WatchRequest_CreateRequest)(nil), - (*WatchRequest_CancelRequest)(nil), - } -} - -func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*WatchRequest) - // request_union - switch x := m.RequestUnion.(type) { - case *WatchRequest_CreateRequest: - _ = b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.CreateRequest); err != nil { - return err - } - case *WatchRequest_CancelRequest: - _ = b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.CancelRequest); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x) - } - return nil -} - -func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*WatchRequest) - switch tag { - case 1: // request_union.create_request - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(WatchCreateRequest) - err := b.DecodeMessage(msg) - m.RequestUnion = &WatchRequest_CreateRequest{msg} - return true, err - case 2: // request_union.cancel_request - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(WatchCancelRequest) - err := b.DecodeMessage(msg) - m.RequestUnion = &WatchRequest_CancelRequest{msg} - return true, err - default: - return false, nil - } -} - -func _WatchRequest_OneofSizer(msg proto.Message) (n int) { - m := msg.(*WatchRequest) - // request_union - switch x := m.RequestUnion.(type) { - case *WatchRequest_CreateRequest: - s := proto.Size(x.CreateRequest) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *WatchRequest_CancelRequest: - s := proto.Size(x.CancelRequest) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type WatchCreateRequest struct { - // key is the key to register for watching. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // range_end is the end of the range [key, range_end) to watch. If range_end is not given, - // only the key argument is watched. If range_end is equal to '\0', all keys greater than - // or equal to the key argument are watched. - // If the range_end is one bit larger than the given key, - // then all keys with the prefix (the given key) will be watched. - RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". - StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"` - // progress_notify is set so that the etcd server will periodically send a WatchResponse with - // no events to the new watcher if there are no recent events. It is useful when clients - // wish to recover a disconnected watcher starting from a recent known revision. - // The etcd server may decide how often it will send notifications based on current load. - ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` - // filters filter the events at server side before it sends back to the watcher. - Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` - // If prev_kv is set, created watcher gets the previous KV before the event happens. - // If the previous KV is already compacted, nothing will be returned. - PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` -} - -func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } -func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCreateRequest) ProtoMessage() {} -func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } - -type WatchCancelRequest struct { - // watch_id is the watcher id to cancel so that no more events are transmitted. - WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` -} - -func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } -func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCancelRequest) ProtoMessage() {} -func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } - -type WatchResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // watch_id is the ID of the watcher that corresponds to the response. - WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` - // created is set to true if the response is for a create watch request. - // The client should record the watch_id and expect to receive events for - // the created watcher from the same stream. - // All events sent to the created watcher will attach with the same watch_id. - Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"` - // canceled is set to true if the response is for a cancel watch request. - // No further events will be sent to the canceled watcher. - Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"` - // compact_revision is set to the minimum index if a watcher tries to watch - // at a compacted index. - // - // This happens when creating a watcher at a compacted revision or the watcher cannot - // catch up with the progress of the key-value store. - // - // The client should treat the watcher as canceled and should not try to create any - // watcher with the same start_revision again. - CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` -} - -func (m *WatchResponse) Reset() { *m = WatchResponse{} } -func (m *WatchResponse) String() string { return proto.CompactTextString(m) } -func (*WatchResponse) ProtoMessage() {} -func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{21} } - -func (m *WatchResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *WatchResponse) GetEvents() []*mvccpb.Event { - if m != nil { - return m.Events - } - return nil -} - -type LeaseGrantRequest struct { - // TTL is the advisory time-to-live in seconds. - TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` - // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } -func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantRequest) ProtoMessage() {} -func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } - -type LeaseGrantResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // ID is the lease ID for the granted lease. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the server chosen lease time-to-live in seconds. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` -} - -func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } -func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantResponse) ProtoMessage() {} -func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{23} } - -func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type LeaseRevokeRequest struct { - // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } -func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeRequest) ProtoMessage() {} -func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } - -type LeaseRevokeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } -func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeResponse) ProtoMessage() {} -func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{25} } - -func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type LeaseKeepAliveRequest struct { - // ID is the lease ID for the lease to keep alive. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } -func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveRequest) ProtoMessage() {} -func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } - -type LeaseKeepAliveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // ID is the lease ID from the keep alive request. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the new time-to-live for the lease. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` -} - -func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } -func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveResponse) ProtoMessage() {} -func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} } - -func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type LeaseTimeToLiveRequest struct { - // ID is the lease ID for the lease. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // keys is true to query all the keys attached to this lease. - Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` -} - -func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } -func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveRequest) ProtoMessage() {} -func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } - -type LeaseTimeToLiveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // ID is the lease ID from the keep alive request. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. - GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` - // Keys is the list of keys attached to this lease. - Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` -} - -func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } -func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveResponse) ProtoMessage() {} -func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } - -func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type Member struct { - // ID is the member ID for this member. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // name is the human-readable name of the member. If the member is not started, the name will be an empty string. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // peerURLs is the list of URLs the member exposes to the cluster for communication. - PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"` - // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. - ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"` -} - -func (m *Member) Reset() { *m = Member{} } -func (m *Member) String() string { return proto.CompactTextString(m) } -func (*Member) ProtoMessage() {} -func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } - -type MemberAddRequest struct { - // peerURLs is the list of URLs the added member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` -} - -func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } -func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } -func (*MemberAddRequest) ProtoMessage() {} -func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } - -type MemberAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // member is the member information for the added member. - Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` -} - -func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } -func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } -func (*MemberAddResponse) ProtoMessage() {} -func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} } - -func (m *MemberAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberAddResponse) GetMember() *Member { - if m != nil { - return m.Member - } - return nil -} - -type MemberRemoveRequest struct { - // ID is the member ID of the member to remove. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } -func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveRequest) ProtoMessage() {} -func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } - -type MemberRemoveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } -func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveResponse) ProtoMessage() {} -func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} } - -func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type MemberUpdateRequest struct { - // ID is the member ID of the member to update. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - // peerURLs is the new list of URLs the member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs" json:"peerURLs,omitempty"` -} - -func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } -func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateRequest) ProtoMessage() {} -func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } - -type MemberUpdateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } -func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateResponse) ProtoMessage() {} -func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} } - -func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type MemberListRequest struct { -} - -func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } -func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } -func (*MemberListRequest) ProtoMessage() {} -func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} } - -type MemberListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // members is a list of all members associated with the cluster. - Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` -} - -func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } -func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } -func (*MemberListResponse) ProtoMessage() {} -func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} } - -func (m *MemberListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *MemberListResponse) GetMembers() []*Member { - if m != nil { - return m.Members - } - return nil -} - -type DefragmentRequest struct { -} - -func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } -func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } -func (*DefragmentRequest) ProtoMessage() {} -func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} } - -type DefragmentResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } -func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } -func (*DefragmentResponse) ProtoMessage() {} -func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} } - -func (m *DefragmentResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AlarmRequest struct { - // action is the kind of alarm request to issue. The action - // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a - // raised alarm. - Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"` - // memberID is the ID of the member associated with the alarm. If memberID is 0, the - // alarm request covers all members. - MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` - // alarm is the type of alarm to consider for this request. - Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` -} - -func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } -func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } -func (*AlarmRequest) ProtoMessage() {} -func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } - -type AlarmMember struct { - // memberID is the ID of the member associated with the raised alarm. - MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` - // alarm is the type of alarm which has been raised. - Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` -} - -func (m *AlarmMember) Reset() { *m = AlarmMember{} } -func (m *AlarmMember) String() string { return proto.CompactTextString(m) } -func (*AlarmMember) ProtoMessage() {} -func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } - -type AlarmResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // alarms is a list of alarms associated with the alarm request. - Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms" json:"alarms,omitempty"` -} - -func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } -func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } -func (*AlarmResponse) ProtoMessage() {} -func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } - -func (m *AlarmResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AlarmResponse) GetAlarms() []*AlarmMember { - if m != nil { - return m.Alarms - } - return nil -} - -type StatusRequest struct { -} - -func (m *StatusRequest) Reset() { *m = StatusRequest{} } -func (m *StatusRequest) String() string { return proto.CompactTextString(m) } -func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } - -type StatusResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // version is the cluster protocol version used by the responding member. - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // dbSize is the size of the backend database, in bytes, of the responding member. - DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` - // leader is the member ID which the responding member believes is the current leader. - Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` - // raftIndex is the current raft index of the responding member. - RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` - // raftTerm is the current raft term of the responding member. - RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` -} - -func (m *StatusResponse) Reset() { *m = StatusResponse{} } -func (m *StatusResponse) String() string { return proto.CompactTextString(m) } -func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } - -func (m *StatusResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthEnableRequest struct { -} - -func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } -func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthEnableRequest) ProtoMessage() {} -func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } - -type AuthDisableRequest struct { -} - -func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } -func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthDisableRequest) ProtoMessage() {} -func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } - -type AuthenticateRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` -} - -func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } -func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } -func (*AuthenticateRequest) ProtoMessage() {} -func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } - -type AuthUserAddRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` -} - -func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } -func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddRequest) ProtoMessage() {} -func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } - -type AuthUserGetRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } -func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetRequest) ProtoMessage() {} -func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } - -type AuthUserDeleteRequest struct { - // name is the name of the user to delete. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } -func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteRequest) ProtoMessage() {} -func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } - -type AuthUserChangePasswordRequest struct { - // name is the name of the user whose password is being changed. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // password is the new password for the user. - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` -} - -func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } -func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserChangePasswordRequest) ProtoMessage() {} -func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{52} -} - -type AuthUserGrantRoleRequest struct { - // user is the name of the user which should be granted a given role. - User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` - // role is the name of the role to grant to the user. - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } -func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleRequest) ProtoMessage() {} -func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } - -type AuthUserRevokeRoleRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } -func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleRequest) ProtoMessage() {} -func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } - -type AuthRoleAddRequest struct { - // name is the name of the role to add to the authentication system. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } -func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddRequest) ProtoMessage() {} -func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } - -type AuthRoleGetRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } -func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetRequest) ProtoMessage() {} -func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } - -type AuthUserListRequest struct { -} - -func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } -func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserListRequest) ProtoMessage() {} -func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } - -type AuthRoleListRequest struct { -} - -func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } -func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListRequest) ProtoMessage() {} -func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } - -type AuthRoleDeleteRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` -} - -func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } -func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteRequest) ProtoMessage() {} -func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } - -type AuthRoleGrantPermissionRequest struct { - // name is the name of the role which will be granted the permission. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // perm is the permission to grant to the role. - Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm" json:"perm,omitempty"` -} - -func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } -func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} -func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{60} -} - -func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { - if m != nil { - return m.Perm - } - return nil -} - -type AuthRoleRevokePermissionRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` -} - -func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } -func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} -func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{61} -} - -type AuthEnableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } -func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthEnableResponse) ProtoMessage() {} -func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } - -func (m *AuthEnableResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthDisableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } -func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthDisableResponse) ProtoMessage() {} -func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } - -func (m *AuthDisableResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthenticateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - // token is an authorized token that can be used in succeeding RPCs - Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` -} - -func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } -func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } -func (*AuthenticateResponse) ProtoMessage() {} -func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } - -func (m *AuthenticateResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } -func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddResponse) ProtoMessage() {} -func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} } - -func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` -} - -func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } -func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetResponse) ProtoMessage() {} -func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } - -func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } -func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteResponse) ProtoMessage() {} -func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } - -func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserChangePasswordResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } -func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserChangePasswordResponse) ProtoMessage() {} -func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{68} -} - -func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserGrantRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } -func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleResponse) ProtoMessage() {} -func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } - -func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserRevokeRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } -func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleResponse) ProtoMessage() {} -func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } - -func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } -func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddResponse) ProtoMessage() {} -func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } - -func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm" json:"perm,omitempty"` -} - -func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } -func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetResponse) ProtoMessage() {} -func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } - -func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { - if m != nil { - return m.Perm - } - return nil -} - -type AuthRoleListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` -} - -func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } -func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListResponse) ProtoMessage() {} -func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} } - -func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthUserListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` - Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` -} - -func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } -func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserListResponse) ProtoMessage() {} -func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} } - -func (m *AuthUserListResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } -func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteResponse) ProtoMessage() {} -func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} } - -func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleGrantPermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } -func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} -func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{76} -} - -func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -type AuthRoleRevokePermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` -} - -func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } -func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} -func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{77} -} - -func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { - if m != nil { - return m.Header - } - return nil -} - -func init() { - proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") - proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") - proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") - proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest") - proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse") - proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest") - proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse") - proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp") - proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp") - proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare") - proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest") - proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse") - proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest") - proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse") - proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest") - proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse") - proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest") - proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse") - proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest") - proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest") - proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest") - proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse") - proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest") - proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") - proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") - proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") - proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") - proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") - proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") - proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse") - proto.RegisterType((*Member)(nil), "etcdserverpb.Member") - proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest") - proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse") - proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest") - proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse") - proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest") - proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") - proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") - proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") - proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") - proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") - proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") - proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") - proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") - proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse") - proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest") - proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest") - proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest") - proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest") - proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest") - proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest") - proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest") - proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest") - proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest") - proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest") - proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest") - proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest") - proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest") - proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest") - proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest") - proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest") - proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse") - proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse") - proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse") - proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse") - proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse") - proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse") - proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse") - proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse") - proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse") - proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse") - proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse") - proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse") - proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse") - proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") - proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") - proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") - proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) - proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) - proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for KV service - -type KVClient interface { - // Range gets the keys in the range from the key-value store. - Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) -} - -type kVClient struct { - cc *grpc.ClientConn -} - -func NewKVClient(cc *grpc.ClientConn) KVClient { - return &kVClient{cc} -} - -func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { - out := new(RangeResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { - out := new(PutResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { - out := new(DeleteRangeResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { - out := new(TxnResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { - out := new(CompactionResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for KV service - -type KVServer interface { - // Range gets the keys in the range from the key-value store. - Range(context.Context, *RangeRequest) (*RangeResponse, error) - // Put puts the given key into the key-value store. - // A put request increments the revision of the key-value store - // and generates one event in the event history. - Put(context.Context, *PutRequest) (*PutResponse, error) - // DeleteRange deletes the given range from the key-value store. - // A delete request increments the revision of the key-value store - // and generates a delete event in the event history for every deleted key. - DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error) - // Txn processes multiple requests in a single transaction. - // A txn request increments the revision of the key-value store - // and generates events with the same revision for every completed request. - // It is not allowed to modify the same key several times within one txn. - Txn(context.Context, *TxnRequest) (*TxnResponse, error) - // Compact compacts the event history in the etcd key-value store. The key-value - // store should be periodically compacted or the event history will continue to grow - // indefinitely. - Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) -} - -func RegisterKVServer(s *grpc.Server, srv KVServer) { - s.RegisterService(&_KV_serviceDesc, srv) -} - -func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RangeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Range(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Range", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Range(ctx, req.(*RangeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PutRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Put(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Put", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Put(ctx, req.(*PutRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRangeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).DeleteRange(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/DeleteRange", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TxnRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Txn(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Txn", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Txn(ctx, req.(*TxnRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CompactionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KVServer).Compact(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.KV/Compact", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).Compact(ctx, req.(*CompactionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _KV_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.KV", - HandlerType: (*KVServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Range", - Handler: _KV_Range_Handler, - }, - { - MethodName: "Put", - Handler: _KV_Put_Handler, - }, - { - MethodName: "DeleteRange", - Handler: _KV_DeleteRange_Handler, - }, - { - MethodName: "Txn", - Handler: _KV_Txn_Handler, - }, - { - MethodName: "Compact", - Handler: _KV_Compact_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -// Client API for Watch service - -type WatchClient interface { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) -} - -type watchClient struct { - cc *grpc.ClientConn -} - -func NewWatchClient(cc *grpc.ClientConn) WatchClient { - return &watchClient{cc} -} - -func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Watch/Watch", opts...) - if err != nil { - return nil, err - } - x := &watchWatchClient{stream} - return x, nil -} - -type Watch_WatchClient interface { - Send(*WatchRequest) error - Recv() (*WatchResponse, error) - grpc.ClientStream -} - -type watchWatchClient struct { - grpc.ClientStream -} - -func (x *watchWatchClient) Send(m *WatchRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *watchWatchClient) Recv() (*WatchResponse, error) { - m := new(WatchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for Watch service - -type WatchServer interface { - // Watch watches for events happening or that have happened. Both input and output - // are streams; the input stream is for creating and canceling watchers and the output - // stream sends events. One watch RPC can watch on multiple key ranges, streaming events - // for several watches at once. The entire event history can be watched starting from the - // last compaction revision. - Watch(Watch_WatchServer) error -} - -func RegisterWatchServer(s *grpc.Server, srv WatchServer) { - s.RegisterService(&_Watch_serviceDesc, srv) -} - -func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(WatchServer).Watch(&watchWatchServer{stream}) -} - -type Watch_WatchServer interface { - Send(*WatchResponse) error - Recv() (*WatchRequest, error) - grpc.ServerStream -} - -type watchWatchServer struct { - grpc.ServerStream -} - -func (x *watchWatchServer) Send(m *WatchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *watchWatchServer) Recv() (*WatchRequest, error) { - m := new(WatchRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Watch_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Watch", - HandlerType: (*WatchServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Watch", - Handler: _Watch_Watch_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// Client API for Lease service - -type LeaseClient interface { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) -} - -type leaseClient struct { - cc *grpc.ClientConn -} - -func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { - return &leaseClient{cc} -} - -func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { - out := new(LeaseGrantResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { - out := new(LeaseRevokeResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Lease_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Lease/LeaseKeepAlive", opts...) - if err != nil { - return nil, err - } - x := &leaseLeaseKeepAliveClient{stream} - return x, nil -} - -type Lease_LeaseKeepAliveClient interface { - Send(*LeaseKeepAliveRequest) error - Recv() (*LeaseKeepAliveResponse, error) - grpc.ClientStream -} - -type leaseLeaseKeepAliveClient struct { - grpc.ClientStream -} - -func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { - m := new(LeaseKeepAliveResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { - out := new(LeaseTimeToLiveResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Lease service - -type LeaseServer interface { - // LeaseGrant creates a lease which expires if the server does not receive a keepAlive - // within a given time to live period. All keys attached to the lease will be expired and - // deleted if the lease expires. Each expired key generates a delete event in the event history. - LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error) - // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. - LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error) - // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client - // to the server and streaming keep alive responses from the server to the client. - LeaseKeepAlive(Lease_LeaseKeepAliveServer) error - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) -} - -func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { - s.RegisterService(&_Lease_serviceDesc, srv) -} - -func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseGrantRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseGrant(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseGrant", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseRevokeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseRevoke(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseRevoke", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream}) -} - -type Lease_LeaseKeepAliveServer interface { - Send(*LeaseKeepAliveResponse) error - Recv() (*LeaseKeepAliveRequest, error) - grpc.ServerStream -} - -type leaseLeaseKeepAliveServer struct { - grpc.ServerStream -} - -func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) { - m := new(LeaseKeepAliveRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LeaseTimeToLiveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeaseServer).LeaseTimeToLive(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Lease_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Lease", - HandlerType: (*LeaseServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LeaseGrant", - Handler: _Lease_LeaseGrant_Handler, - }, - { - MethodName: "LeaseRevoke", - Handler: _Lease_LeaseRevoke_Handler, - }, - { - MethodName: "LeaseTimeToLive", - Handler: _Lease_LeaseTimeToLive_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "LeaseKeepAlive", - Handler: _Lease_LeaseKeepAlive_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// Client API for Cluster service - -type ClusterClient interface { - // MemberAdd adds a member into the cluster. - MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) - // MemberRemove removes an existing member from the cluster. - MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) - // MemberUpdate updates the member configuration. - MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) - // MemberList lists all the members in the cluster. - MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) -} - -type clusterClient struct { - cc *grpc.ClientConn -} - -func NewClusterClient(cc *grpc.ClientConn) ClusterClient { - return &clusterClient{cc} -} - -func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { - out := new(MemberAddResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { - out := new(MemberRemoveResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { - out := new(MemberUpdateResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { - out := new(MemberListResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Cluster service - -type ClusterServer interface { - // MemberAdd adds a member into the cluster. - MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) - // MemberRemove removes an existing member from the cluster. - MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error) - // MemberUpdate updates the member configuration. - MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) - // MemberList lists all the members in the cluster. - MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) -} - -func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { - s.RegisterService(&_Cluster_serviceDesc, srv) -} - -func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberRemoveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberRemove(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberRemove", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberUpdateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberUpdate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberUpdate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MemberListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ClusterServer).MemberList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Cluster/MemberList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Cluster_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Cluster", - HandlerType: (*ClusterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "MemberAdd", - Handler: _Cluster_MemberAdd_Handler, - }, - { - MethodName: "MemberRemove", - Handler: _Cluster_MemberRemove_Handler, - }, - { - MethodName: "MemberUpdate", - Handler: _Cluster_MemberUpdate_Handler, - }, - { - MethodName: "MemberList", - Handler: _Cluster_MemberList_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -// Client API for Maintenance service - -type MaintenanceClient interface { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) - // Status gets the status of the member. - Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) - // Defragment defragments a member's backend database to recover storage space. - Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) - // Hash returns the hash of the local KV state for consistency checking purpose. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. - Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) -} - -type maintenanceClient struct { - cc *grpc.ClientConn -} - -func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { - return &maintenanceClient{cc} -} - -func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { - out := new(AlarmResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { - out := new(DefragmentResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { - out := new(HashResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Maintenance_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Maintenance/Snapshot", opts...) - if err != nil { - return nil, err - } - x := &maintenanceSnapshotClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Maintenance_SnapshotClient interface { - Recv() (*SnapshotResponse, error) - grpc.ClientStream -} - -type maintenanceSnapshotClient struct { - grpc.ClientStream -} - -func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { - m := new(SnapshotResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for Maintenance service - -type MaintenanceServer interface { - // Alarm activates, deactivates, and queries alarms regarding cluster health. - Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) - // Status gets the status of the member. - Status(context.Context, *StatusRequest) (*StatusResponse, error) - // Defragment defragments a member's backend database to recover storage space. - Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) - // Hash returns the hash of the local KV state for consistency checking purpose. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. - Hash(context.Context, *HashRequest) (*HashResponse, error) - // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. - Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error -} - -func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { - s.RegisterService(&_Maintenance_serviceDesc, srv) -} - -func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AlarmRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Alarm(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Alarm", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DefragmentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Defragment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Defragment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HashRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MaintenanceServer).Hash(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Maintenance/Hash", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SnapshotRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream}) -} - -type Maintenance_SnapshotServer interface { - Send(*SnapshotResponse) error - grpc.ServerStream -} - -type maintenanceSnapshotServer struct { - grpc.ServerStream -} - -func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { - return x.ServerStream.SendMsg(m) -} - -var _Maintenance_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Maintenance", - HandlerType: (*MaintenanceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Alarm", - Handler: _Maintenance_Alarm_Handler, - }, - { - MethodName: "Status", - Handler: _Maintenance_Status_Handler, - }, - { - MethodName: "Defragment", - Handler: _Maintenance_Defragment_Handler, - }, - { - MethodName: "Hash", - Handler: _Maintenance_Hash_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Snapshot", - Handler: _Maintenance_Snapshot_Handler, - ServerStreams: true, - }, - }, - Metadata: "rpc.proto", -} - -// Client API for Auth service - -type AuthClient interface { - // AuthEnable enables authentication. - AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) - // AuthDisable disables authentication. - AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) - // Authenticate processes an authenticate request. - Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) - // UserAdd adds a new user. - UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) - // UserGet gets detailed user information. - UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) - // UserList gets a list of all users. - UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) - // UserDelete deletes a specified user. - UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) - // UserChangePassword changes the password of a specified user. - UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) - // UserGrant grants a role to a specified user. - UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) - // UserRevokeRole revokes a role of specified user. - UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. - RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) - // RoleGet gets detailed role information. - RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) - // RoleList gets lists of all roles. - RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) - // RoleDelete deletes a specified role. - RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) - // RoleRevokePermission revokes a key or range permission of a specified role. - RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) -} - -type authClient struct { - cc *grpc.ClientConn -} - -func NewAuthClient(cc *grpc.ClientConn) AuthClient { - return &authClient{cc} -} - -func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { - out := new(AuthEnableResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { - out := new(AuthDisableResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { - out := new(AuthenticateResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { - out := new(AuthUserAddResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { - out := new(AuthUserGetResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { - out := new(AuthUserListResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { - out := new(AuthUserDeleteResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { - out := new(AuthUserChangePasswordResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { - out := new(AuthUserGrantRoleResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { - out := new(AuthUserRevokeRoleResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { - out := new(AuthRoleAddResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { - out := new(AuthRoleGetResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { - out := new(AuthRoleListResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { - out := new(AuthRoleDeleteResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { - out := new(AuthRoleGrantPermissionResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { - out := new(AuthRoleRevokePermissionResponse) - err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Auth service - -type AuthServer interface { - // AuthEnable enables authentication. - AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) - // AuthDisable disables authentication. - AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) - // Authenticate processes an authenticate request. - Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) - // UserAdd adds a new user. - UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) - // UserGet gets detailed user information. - UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) - // UserList gets a list of all users. - UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error) - // UserDelete deletes a specified user. - UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) - // UserChangePassword changes the password of a specified user. - UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) - // UserGrant grants a role to a specified user. - UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) - // UserRevokeRole revokes a role of specified user. - UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. - RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) - // RoleGet gets detailed role information. - RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) - // RoleList gets lists of all roles. - RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error) - // RoleDelete deletes a specified role. - RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) - // RoleGrantPermission grants a permission of a specified key or range to a specified role. - RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) - // RoleRevokePermission revokes a key or range permission of a specified role. - RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) -} - -func RegisterAuthServer(s *grpc.Server, srv AuthServer) { - s.RegisterService(&_Auth_serviceDesc, srv) -} - -func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthEnableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).AuthEnable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/AuthEnable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthDisableRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).AuthDisable(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/AuthDisable", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthenticateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).Authenticate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/Authenticate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserGetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserGet(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserGet", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserChangePasswordRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserChangePassword(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserChangePassword", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserGrantRoleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserGrantRole(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserGrantRole", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthUserRevokeRoleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).UserRevokeRole(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/UserRevokeRole", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleAddRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleAdd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleAdd", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleGetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleGet(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleGet", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleList(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleList", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleDelete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleDelete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleGrantPermissionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleGrantPermission(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleGrantPermission", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AuthRoleRevokePermissionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).RoleRevokePermission(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/etcdserverpb.Auth/RoleRevokePermission", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Auth_serviceDesc = grpc.ServiceDesc{ - ServiceName: "etcdserverpb.Auth", - HandlerType: (*AuthServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "AuthEnable", - Handler: _Auth_AuthEnable_Handler, - }, - { - MethodName: "AuthDisable", - Handler: _Auth_AuthDisable_Handler, - }, - { - MethodName: "Authenticate", - Handler: _Auth_Authenticate_Handler, - }, - { - MethodName: "UserAdd", - Handler: _Auth_UserAdd_Handler, - }, - { - MethodName: "UserGet", - Handler: _Auth_UserGet_Handler, - }, - { - MethodName: "UserList", - Handler: _Auth_UserList_Handler, - }, - { - MethodName: "UserDelete", - Handler: _Auth_UserDelete_Handler, - }, - { - MethodName: "UserChangePassword", - Handler: _Auth_UserChangePassword_Handler, - }, - { - MethodName: "UserGrantRole", - Handler: _Auth_UserGrantRole_Handler, - }, - { - MethodName: "UserRevokeRole", - Handler: _Auth_UserRevokeRole_Handler, - }, - { - MethodName: "RoleAdd", - Handler: _Auth_RoleAdd_Handler, - }, - { - MethodName: "RoleGet", - Handler: _Auth_RoleGet_Handler, - }, - { - MethodName: "RoleList", - Handler: _Auth_RoleList_Handler, - }, - { - MethodName: "RoleDelete", - Handler: _Auth_RoleDelete_Handler, - }, - { - MethodName: "RoleGrantPermission", - Handler: _Auth_RoleGrantPermission_Handler, - }, - { - MethodName: "RoleRevokePermission", - Handler: _Auth_RoleRevokePermission_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "rpc.proto", -} - -func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ClusterId != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) - } - if m.MemberId != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) - } - if m.Revision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - } - if m.RaftTerm != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - } - return i, nil -} - -func (m *RangeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - if m.Limit != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - } - if m.Revision != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - } - if m.SortOrder != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) - } - if m.SortTarget != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) - } - if m.Serializable { - dAtA[i] = 0x38 - i++ - if m.Serializable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.KeysOnly { - dAtA[i] = 0x40 - i++ - if m.KeysOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.CountOnly { - dAtA[i] = 0x48 - i++ - if m.CountOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.MinModRevision != 0 { - dAtA[i] = 0x50 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) - } - if m.MaxModRevision != 0 { - dAtA[i] = 0x58 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) - } - if m.MinCreateRevision != 0 { - dAtA[i] = 0x60 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) - } - if m.MaxCreateRevision != 0 { - dAtA[i] = 0x68 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) - } - return i, nil -} - -func (m *RangeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n1, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if len(m.Kvs) > 0 { - for _, msg := range m.Kvs { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.More { - dAtA[i] = 0x18 - i++ - if m.More { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Count != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Count)) - } - return i, nil -} - -func (m *PutRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.Value) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - if m.Lease != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) - } - if m.PrevKv { - dAtA[i] = 0x20 - i++ - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *PutResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n2, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.PrevKv != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.PrevKv.Size())) - n3, err := m.PrevKv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - return i, nil -} - -func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - if m.PrevKv { - dAtA[i] = 0x18 - i++ - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n4, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.Deleted != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) - } - if len(m.PrevKvs) > 0 { - for _, msg := range m.PrevKvs { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RequestOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Request != nil { - nn5, err := m.Request.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn5 - } - return i, nil -} - -func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.RequestRange != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RequestRange.Size())) - n6, err := m.RequestRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - return i, nil -} -func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.RequestPut != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RequestPut.Size())) - n7, err := m.RequestPut.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} -func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.RequestDeleteRange != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RequestDeleteRange.Size())) - n8, err := m.RequestDeleteRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} -func (m *ResponseOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Response != nil { - nn9, err := m.Response.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn9 - } - return i, nil -} - -func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResponseRange != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size())) - n10, err := m.ResponseRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - return i, nil -} -func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResponsePut != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size())) - n11, err := m.ResponsePut.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - return i, nil -} -func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.ResponseDeleteRange != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size())) - n12, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - return i, nil -} -func (m *Compare) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Compare) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Result != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Result)) - } - if m.Target != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Target)) - } - if len(m.Key) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if m.TargetUnion != nil { - nn13, err := m.TargetUnion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn13 - } - return i, nil -} - -func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Version)) - return i, nil -} -func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) - return i, nil -} -func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) { - i := 0 - dAtA[i] = 0x30 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) - return i, nil -} -func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Value != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - return i, nil -} -func (m *TxnRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Compare) > 0 { - for _, msg := range m.Compare { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Success) > 0 { - for _, msg := range m.Success { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Failure) > 0 { - for _, msg := range m.Failure { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *TxnResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n14, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Succeeded { - dAtA[i] = 0x10 - i++ - if m.Succeeded { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Responses) > 0 { - for _, msg := range m.Responses { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Revision != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - } - if m.Physical { - dAtA[i] = 0x10 - i++ - if m.Physical { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n15, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - return i, nil -} - -func (m *HashRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *HashResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n16, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - if m.Hash != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) - } - return i, nil -} - -func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n17, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - if m.RemainingBytes != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) - } - if len(m.Blob) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) - i += copy(dAtA[i:], m.Blob) - } - return i, nil -} - -func (m *WatchRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.RequestUnion != nil { - nn18, err := m.RequestUnion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn18 - } - return i, nil -} - -func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.CreateRequest != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size())) - n19, err := m.CreateRequest.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - return i, nil -} -func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.CancelRequest != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size())) - n20, err := m.CancelRequest.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - } - return i, nil -} -func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - if m.StartRevision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) - } - if m.ProgressNotify { - dAtA[i] = 0x20 - i++ - if m.ProgressNotify { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Filters) > 0 { - dAtA22 := make([]byte, len(m.Filters)*10) - var j21 int - for _, num := range m.Filters { - for num >= 1<<7 { - dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j21++ - } - dAtA22[j21] = uint8(num) - j21++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintRpc(dAtA, i, uint64(j21)) - i += copy(dAtA[i:], dAtA22[:j21]) - } - if m.PrevKv { - dAtA[i] = 0x30 - i++ - if m.PrevKv { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.WatchId != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - } - return i, nil -} - -func (m *WatchResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n23, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - if m.WatchId != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - } - if m.Created { - dAtA[i] = 0x18 - i++ - if m.Created { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Canceled { - dAtA[i] = 0x20 - i++ - if m.Canceled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.CompactRevision != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) - } - if len(m.Events) > 0 { - for _, msg := range m.Events { - dAtA[i] = 0x5a - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TTL != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n24, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.TTL != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - if len(m.Error) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - } - return i, nil -} - -func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n25, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - } - return i, nil -} - -func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n26, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.TTL != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - return i, nil -} - -func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.Keys { - dAtA[i] = 0x10 - i++ - if m.Keys { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n27, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - } - if m.ID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if m.TTL != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - } - if m.GrantedTTL != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) - } - if len(m.Keys) > 0 { - for _, b := range m.Keys { - dAtA[i] = 0x2a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(b))) - i += copy(dAtA[i:], b) - } - } - return i, nil -} - -func (m *Member) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Member) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if len(m.Name) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ClientURLs) > 0 { - for _, s := range m.ClientURLs { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n28, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - } - if m.Member != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size())) - n29, err := m.Member.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - } - return i, nil -} - -func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - return i, nil -} - -func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n30, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - } - return i, nil -} - -func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n31, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } - return i, nil -} - -func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n32, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - } - if len(m.Members) > 0 { - for _, msg := range m.Members { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n33, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - } - return i, nil -} - -func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Action != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Action)) - } - if m.MemberID != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - } - if m.Alarm != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - } - return i, nil -} - -func (m *AlarmMember) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MemberID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - } - if m.Alarm != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - } - return i, nil -} - -func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n34, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - } - if len(m.Alarms) > 0 { - for _, msg := range m.Alarms { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StatusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *StatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n35, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - } - if len(m.Version) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - } - if m.DbSize != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) - } - if m.Leader != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) - } - if m.RaftIndex != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) - } - if m.RaftTerm != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - } - return i, nil -} - -func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - return i, nil -} - -func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - return i, nil -} - -func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - return i, nil -} - -func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - return i, nil -} - -func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Password) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i += copy(dAtA[i:], m.Password) - } - return i, nil -} - -func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.User) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - } - if len(m.Role) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Role) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - return i, nil -} - -func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Role) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Role) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - return i, nil -} - -func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if m.Perm != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size())) - n36, err := m.Perm.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - } - return i, nil -} - -func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Role) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - } - if len(m.Key) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if len(m.RangeEnd) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i += copy(dAtA[i:], m.RangeEnd) - } - return i, nil -} - -func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n37, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - } - return i, nil -} - -func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n38, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - } - return i, nil -} - -func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n39, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - } - if len(m.Token) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - } - return i, nil -} - -func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n40, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - } - return i, nil -} - -func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n41, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n42, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 - } - return i, nil -} - -func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n43, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - } - return i, nil -} - -func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n44, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - } - return i, nil -} - -func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n45, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n45 - } - return i, nil -} - -func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n46, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n46 - } - return i, nil -} - -func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n47, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n47 - } - if len(m.Perm) > 0 { - for _, msg := range m.Perm { - dAtA[i] = 0x12 - i++ - i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n48, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n49, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n49 - } - if len(m.Users) > 0 { - for _, s := range m.Users { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n50, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n50 - } - return i, nil -} - -func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n51, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n51 - } - return i, nil -} - -func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Header != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n52, err := m.Header.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n52 - } - return i, nil -} - -func encodeFixed64Rpc(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Rpc(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *ResponseHeader) Size() (n int) { - var l int - _ = l - if m.ClusterId != 0 { - n += 1 + sovRpc(uint64(m.ClusterId)) - } - if m.MemberId != 0 { - n += 1 + sovRpc(uint64(m.MemberId)) - } - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.RaftTerm != 0 { - n += 1 + sovRpc(uint64(m.RaftTerm)) - } - return n -} - -func (m *RangeRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Limit != 0 { - n += 1 + sovRpc(uint64(m.Limit)) - } - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.SortOrder != 0 { - n += 1 + sovRpc(uint64(m.SortOrder)) - } - if m.SortTarget != 0 { - n += 1 + sovRpc(uint64(m.SortTarget)) - } - if m.Serializable { - n += 2 - } - if m.KeysOnly { - n += 2 - } - if m.CountOnly { - n += 2 - } - if m.MinModRevision != 0 { - n += 1 + sovRpc(uint64(m.MinModRevision)) - } - if m.MaxModRevision != 0 { - n += 1 + sovRpc(uint64(m.MaxModRevision)) - } - if m.MinCreateRevision != 0 { - n += 1 + sovRpc(uint64(m.MinCreateRevision)) - } - if m.MaxCreateRevision != 0 { - n += 1 + sovRpc(uint64(m.MaxCreateRevision)) - } - return n -} - -func (m *RangeResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Kvs) > 0 { - for _, e := range m.Kvs { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if m.More { - n += 2 - } - if m.Count != 0 { - n += 1 + sovRpc(uint64(m.Count)) - } - return n -} - -func (m *PutRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovRpc(uint64(m.Lease)) - } - if m.PrevKv { - n += 2 - } - return n -} - -func (m *PutResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.PrevKv != nil { - l = m.PrevKv.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *DeleteRangeRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.PrevKv { - n += 2 - } - return n -} - -func (m *DeleteRangeResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Deleted != 0 { - n += 1 + sovRpc(uint64(m.Deleted)) - } - if len(m.PrevKvs) > 0 { - for _, e := range m.PrevKvs { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *RequestOp) Size() (n int) { - var l int - _ = l - if m.Request != nil { - n += m.Request.Size() - } - return n -} - -func (m *RequestOp_RequestRange) Size() (n int) { - var l int - _ = l - if m.RequestRange != nil { - l = m.RequestRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *RequestOp_RequestPut) Size() (n int) { - var l int - _ = l - if m.RequestPut != nil { - l = m.RequestPut.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *RequestOp_RequestDeleteRange) Size() (n int) { - var l int - _ = l - if m.RequestDeleteRange != nil { - l = m.RequestDeleteRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp) Size() (n int) { - var l int - _ = l - if m.Response != nil { - n += m.Response.Size() - } - return n -} - -func (m *ResponseOp_ResponseRange) Size() (n int) { - var l int - _ = l - if m.ResponseRange != nil { - l = m.ResponseRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp_ResponsePut) Size() (n int) { - var l int - _ = l - if m.ResponsePut != nil { - l = m.ResponsePut.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { - var l int - _ = l - if m.ResponseDeleteRange != nil { - l = m.ResponseDeleteRange.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *Compare) Size() (n int) { - var l int - _ = l - if m.Result != 0 { - n += 1 + sovRpc(uint64(m.Result)) - } - if m.Target != 0 { - n += 1 + sovRpc(uint64(m.Target)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.TargetUnion != nil { - n += m.TargetUnion.Size() - } - return n -} - -func (m *Compare_Version) Size() (n int) { - var l int - _ = l - n += 1 + sovRpc(uint64(m.Version)) - return n -} -func (m *Compare_CreateRevision) Size() (n int) { - var l int - _ = l - n += 1 + sovRpc(uint64(m.CreateRevision)) - return n -} -func (m *Compare_ModRevision) Size() (n int) { - var l int - _ = l - n += 1 + sovRpc(uint64(m.ModRevision)) - return n -} -func (m *Compare_Value) Size() (n int) { - var l int - _ = l - if m.Value != nil { - l = len(m.Value) - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *TxnRequest) Size() (n int) { - var l int - _ = l - if len(m.Compare) > 0 { - for _, e := range m.Compare { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.Success) > 0 { - for _, e := range m.Success { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.Failure) > 0 { - for _, e := range m.Failure { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *TxnResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Succeeded { - n += 2 - } - if len(m.Responses) > 0 { - for _, e := range m.Responses { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *CompactionRequest) Size() (n int) { - var l int - _ = l - if m.Revision != 0 { - n += 1 + sovRpc(uint64(m.Revision)) - } - if m.Physical { - n += 2 - } - return n -} - -func (m *CompactionResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *HashRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *HashResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Hash != 0 { - n += 1 + sovRpc(uint64(m.Hash)) - } - return n -} - -func (m *SnapshotRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *SnapshotResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.RemainingBytes != 0 { - n += 1 + sovRpc(uint64(m.RemainingBytes)) - } - l = len(m.Blob) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *WatchRequest) Size() (n int) { - var l int - _ = l - if m.RequestUnion != nil { - n += m.RequestUnion.Size() - } - return n -} - -func (m *WatchRequest_CreateRequest) Size() (n int) { - var l int - _ = l - if m.CreateRequest != nil { - l = m.CreateRequest.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *WatchRequest_CancelRequest) Size() (n int) { - var l int - _ = l - if m.CancelRequest != nil { - l = m.CancelRequest.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} -func (m *WatchCreateRequest) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.StartRevision != 0 { - n += 1 + sovRpc(uint64(m.StartRevision)) - } - if m.ProgressNotify { - n += 2 - } - if len(m.Filters) > 0 { - l = 0 - for _, e := range m.Filters { - l += sovRpc(uint64(e)) - } - n += 1 + sovRpc(uint64(l)) + l - } - if m.PrevKv { - n += 2 - } - return n -} - -func (m *WatchCancelRequest) Size() (n int) { - var l int - _ = l - if m.WatchId != 0 { - n += 1 + sovRpc(uint64(m.WatchId)) - } - return n -} - -func (m *WatchResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.WatchId != 0 { - n += 1 + sovRpc(uint64(m.WatchId)) - } - if m.Created { - n += 2 - } - if m.Canceled { - n += 2 - } - if m.CompactRevision != 0 { - n += 1 + sovRpc(uint64(m.CompactRevision)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *LeaseGrantRequest) Size() (n int) { - var l int - _ = l - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *LeaseGrantResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *LeaseRevokeRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *LeaseRevokeResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *LeaseKeepAliveRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *LeaseKeepAliveResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - return n -} - -func (m *LeaseTimeToLiveRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.Keys { - n += 2 - } - return n -} - -func (m *LeaseTimeToLiveResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovRpc(uint64(m.TTL)) - } - if m.GrantedTTL != 0 { - n += 1 + sovRpc(uint64(m.GrantedTTL)) - } - if len(m.Keys) > 0 { - for _, b := range m.Keys { - l = len(b) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *Member) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - if len(m.ClientURLs) > 0 { - for _, s := range m.ClientURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberAddRequest) Size() (n int) { - var l int - _ = l - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberAddResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if m.Member != nil { - l = m.Member.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *MemberRemoveRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - return n -} - -func (m *MemberRemoveResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *MemberUpdateRequest) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovRpc(uint64(m.ID)) - } - if len(m.PeerURLs) > 0 { - for _, s := range m.PeerURLs { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *MemberUpdateResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *MemberListRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *MemberListResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Members) > 0 { - for _, e := range m.Members { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *DefragmentRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *DefragmentResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AlarmRequest) Size() (n int) { - var l int - _ = l - if m.Action != 0 { - n += 1 + sovRpc(uint64(m.Action)) - } - if m.MemberID != 0 { - n += 1 + sovRpc(uint64(m.MemberID)) - } - if m.Alarm != 0 { - n += 1 + sovRpc(uint64(m.Alarm)) - } - return n -} - -func (m *AlarmMember) Size() (n int) { - var l int - _ = l - if m.MemberID != 0 { - n += 1 + sovRpc(uint64(m.MemberID)) - } - if m.Alarm != 0 { - n += 1 + sovRpc(uint64(m.Alarm)) - } - return n -} - -func (m *AlarmResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Alarms) > 0 { - for _, e := range m.Alarms { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *StatusRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *StatusResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.DbSize != 0 { - n += 1 + sovRpc(uint64(m.DbSize)) - } - if m.Leader != 0 { - n += 1 + sovRpc(uint64(m.Leader)) - } - if m.RaftIndex != 0 { - n += 1 + sovRpc(uint64(m.RaftIndex)) - } - if m.RaftTerm != 0 { - n += 1 + sovRpc(uint64(m.RaftTerm)) - } - return n -} - -func (m *AuthEnableRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthDisableRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthenticateRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserAddRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGetRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserDeleteRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserChangePasswordRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGrantRoleRequest) Size() (n int) { - var l int - _ = l - l = len(m.User) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserRevokeRoleRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleAddRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGetRequest) Size() (n int) { - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserListRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthRoleListRequest) Size() (n int) { - var l int - _ = l - return n -} - -func (m *AuthRoleDeleteRequest) Size() (n int) { - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGrantPermissionRequest) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - if m.Perm != nil { - l = m.Perm.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleRevokePermissionRequest) Size() (n int) { - var l int - _ = l - l = len(m.Role) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.RangeEnd) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthEnableResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthDisableResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthenticateResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - l = len(m.Token) - if l > 0 { - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserAddResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGetResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthUserDeleteResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserChangePasswordResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserGrantRoleResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthUserRevokeRoleResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleAddResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGetResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Perm) > 0 { - for _, e := range m.Perm { - l = e.Size() - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthRoleListResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Roles) > 0 { - for _, s := range m.Roles { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthUserListResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - if len(m.Users) > 0 { - for _, s := range m.Users { - l = len(s) - n += 1 + l + sovRpc(uint64(l)) - } - } - return n -} - -func (m *AuthRoleDeleteResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleGrantPermissionResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func (m *AuthRoleRevokePermissionResponse) Size() (n int) { - var l int - _ = l - if m.Header != nil { - l = m.Header.Size() - n += 1 + l + sovRpc(uint64(l)) - } - return n -} - -func sovRpc(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRpc(x uint64) (n int) { - return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResponseHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) - } - m.ClusterId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClusterId |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType) - } - m.MemberId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberId |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) - } - m.RaftTerm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftTerm |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RangeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType) - } - m.SortOrder = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SortOrder |= (RangeRequest_SortOrder(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType) - } - m.SortTarget = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SortTarget |= (RangeRequest_SortTarget(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Serializable = bool(v != 0) - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.KeysOnly = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CountOnly = bool(v != 0) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType) - } - m.MinModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinModRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType) - } - m.MaxModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxModRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType) - } - m.MinCreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinCreateRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType) - } - m.MaxCreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxCreateRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RangeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kvs = append(m.Kvs, &mvccpb.KeyValue{}) - if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field More", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.More = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PutRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PutRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PutResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PutResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PrevKv == nil { - m.PrevKv = &mvccpb.KeyValue{} - } - if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) - } - m.Deleted = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Deleted |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{}) - if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RangeRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestRange{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PutRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestPut{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DeleteRangeRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Request = &RequestOp_RequestDeleteRange{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RangeResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponseRange{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PutResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponsePut{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DeleteRangeResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Response = &ResponseOp_ResponseDeleteRange{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Compare) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Compare: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - m.Result = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Result |= (Compare_CompareResult(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - m.Target = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Target |= (Compare_CompareTarget(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_Version{v} - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_CreateRevision{v} - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetUnion = &Compare_ModRevision{v} - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := make([]byte, postIndex-iNdEx) - copy(v, dAtA[iNdEx:postIndex]) - m.TargetUnion = &Compare_Value{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TxnRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Compare = append(m.Compare, &Compare{}) - if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Success = append(m.Success, &RequestOp{}) - if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Failure = append(m.Failure, &RequestOp{}) - if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TxnResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Succeeded = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Responses = append(m.Responses, &ResponseOp{}) - if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CompactionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Physical = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CompactionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HashResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HashResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - m.Hash = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Hash |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType) - } - m.RemainingBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RemainingBytes |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...) - if m.Blob == nil { - m.Blob = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &WatchCreateRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.RequestUnion = &WatchRequest_CreateRequest{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &WatchCancelRequest{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.RequestUnion = &WatchRequest_CancelRequest{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) - if m.RangeEnd == nil { - m.RangeEnd = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType) - } - m.StartRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressNotify = bool(v != 0) - case 5: - if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + packedLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - for iNdEx < postIndex { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) - } - } else if wireType == 0 { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PrevKv = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) - } - m.WatchId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WatchId |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType) - } - m.WatchId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WatchId |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Created = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Canceled = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) - } - m.CompactRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CompactRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, &mvccpb.Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Keys = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType) - } - m.GrantedTTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GrantedTTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx)) - copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Member) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Member: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Member == nil { - m.Member = &Member{} - } - if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemberListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Members = append(m.Members, &Member{}) - if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - m.Action = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Action |= (AlarmRequest_AlarmAction(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) - } - m.MemberID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - m.Alarm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Alarm |= (AlarmType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmMember) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) - } - m.MemberID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemberID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType) - } - m.Alarm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Alarm |= (AlarmType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AlarmResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Alarms = append(m.Alarms, &AlarmMember{}) - if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType) - } - m.DbSize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DbSize |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) - } - m.Leader = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Leader |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) - } - m.RaftIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftIndex |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) - } - m.RaftTerm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftTerm |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Perm == nil { - m.Perm = &authpb.Permission{} - } - if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RangeEnd = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Perm = append(m.Perm, &authpb.Permission{}) - if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Header == nil { - m.Header = &ResponseHeader{} - } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRpc(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRpc - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRpc(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } - -var fileDescriptorRpc = []byte{ - // 3401 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x5b, 0xcb, 0x73, 0x1b, 0xc7, - 0xd1, 0xe7, 0x02, 0x24, 0x40, 0x34, 0x1e, 0x84, 0x86, 0x94, 0x04, 0xae, 0x24, 0x8a, 0x1a, 0xbd, - 0x28, 0xc9, 0x26, 0x6d, 0xda, 0xdf, 0x77, 0xd0, 0xe7, 0x72, 0x7d, 0x14, 0x09, 0x8b, 0x0c, 0x29, - 0x52, 0x5e, 0x52, 0xb2, 0x53, 0xe5, 0x0a, 0x6a, 0x09, 0x8c, 0xc8, 0x2d, 0x02, 0xbb, 0xf0, 0xee, - 0x02, 0x22, 0x9d, 0xa4, 0x2a, 0xe5, 0xd8, 0x95, 0x4a, 0x8e, 0xf1, 0x21, 0xaf, 0x63, 0x2a, 0x87, - 0xfc, 0x01, 0xb9, 0xe5, 0x0f, 0x48, 0xe5, 0x92, 0x54, 0xe5, 0x1f, 0x48, 0x39, 0x39, 0xe4, 0x90, - 0x7b, 0x4e, 0xa9, 0xa4, 0xe6, 0xb5, 0x3b, 0xbb, 0xd8, 0x05, 0xe5, 0x6c, 0x7c, 0x11, 0x77, 0x66, - 0x7a, 0xfa, 0xd7, 0xdd, 0x33, 0xdd, 0xd3, 0xd3, 0x03, 0x41, 0xc9, 0xed, 0xb7, 0x97, 0xfb, 0xae, - 0xe3, 0x3b, 0xa8, 0x42, 0xfc, 0x76, 0xc7, 0x23, 0xee, 0x90, 0xb8, 0xfd, 0x43, 0x7d, 0xee, 0xc8, - 0x39, 0x72, 0xd8, 0xc0, 0x0a, 0xfd, 0xe2, 0x34, 0xfa, 0x3c, 0xa5, 0x59, 0xe9, 0x0d, 0xdb, 0x6d, - 0xf6, 0x4f, 0xff, 0x70, 0xe5, 0x64, 0x28, 0x86, 0xae, 0xb0, 0x21, 0x73, 0xe0, 0x1f, 0xb3, 0x7f, - 0xfa, 0x87, 0xec, 0x8f, 0x18, 0xbc, 0x7a, 0xe4, 0x38, 0x47, 0x5d, 0xb2, 0x62, 0xf6, 0xad, 0x15, - 0xd3, 0xb6, 0x1d, 0xdf, 0xf4, 0x2d, 0xc7, 0xf6, 0xf8, 0x28, 0xfe, 0x5c, 0x83, 0x9a, 0x41, 0xbc, - 0xbe, 0x63, 0x7b, 0x64, 0x93, 0x98, 0x1d, 0xe2, 0xa2, 0x6b, 0x00, 0xed, 0xee, 0xc0, 0xf3, 0x89, - 0xdb, 0xb2, 0x3a, 0x0d, 0x6d, 0x51, 0x5b, 0x9a, 0x34, 0x4a, 0xa2, 0x67, 0xab, 0x83, 0xae, 0x40, - 0xa9, 0x47, 0x7a, 0x87, 0x7c, 0x34, 0xc7, 0x46, 0xa7, 0x79, 0xc7, 0x56, 0x07, 0xe9, 0x30, 0xed, - 0x92, 0xa1, 0xe5, 0x59, 0x8e, 0xdd, 0xc8, 0x2f, 0x6a, 0x4b, 0x79, 0x23, 0x68, 0xd3, 0x89, 0xae, - 0xf9, 0xc2, 0x6f, 0xf9, 0xc4, 0xed, 0x35, 0x26, 0xf9, 0x44, 0xda, 0x71, 0x40, 0xdc, 0x1e, 0xfe, - 0x6c, 0x0a, 0x2a, 0x86, 0x69, 0x1f, 0x11, 0x83, 0x7c, 0x3c, 0x20, 0x9e, 0x8f, 0xea, 0x90, 0x3f, - 0x21, 0x67, 0x0c, 0xbe, 0x62, 0xd0, 0x4f, 0x3e, 0xdf, 0x3e, 0x22, 0x2d, 0x62, 0x73, 0xe0, 0x0a, - 0x9d, 0x6f, 0x1f, 0x91, 0xa6, 0xdd, 0x41, 0x73, 0x30, 0xd5, 0xb5, 0x7a, 0x96, 0x2f, 0x50, 0x79, - 0x23, 0x22, 0xce, 0x64, 0x4c, 0x9c, 0x75, 0x00, 0xcf, 0x71, 0xfd, 0x96, 0xe3, 0x76, 0x88, 0xdb, - 0x98, 0x5a, 0xd4, 0x96, 0x6a, 0xab, 0xb7, 0x96, 0xd5, 0x85, 0x58, 0x56, 0x05, 0x5a, 0xde, 0x77, - 0x5c, 0x7f, 0x8f, 0xd2, 0x1a, 0x25, 0x4f, 0x7e, 0xa2, 0xf7, 0xa0, 0xcc, 0x98, 0xf8, 0xa6, 0x7b, - 0x44, 0xfc, 0x46, 0x81, 0x71, 0xb9, 0x7d, 0x0e, 0x97, 0x03, 0x46, 0x6c, 0x30, 0x78, 0xfe, 0x8d, - 0x30, 0x54, 0x3c, 0xe2, 0x5a, 0x66, 0xd7, 0xfa, 0xc4, 0x3c, 0xec, 0x92, 0x46, 0x71, 0x51, 0x5b, - 0x9a, 0x36, 0x22, 0x7d, 0x54, 0xff, 0x13, 0x72, 0xe6, 0xb5, 0x1c, 0xbb, 0x7b, 0xd6, 0x98, 0x66, - 0x04, 0xd3, 0xb4, 0x63, 0xcf, 0xee, 0x9e, 0xb1, 0x45, 0x73, 0x06, 0xb6, 0xcf, 0x47, 0x4b, 0x6c, - 0xb4, 0xc4, 0x7a, 0xd8, 0xf0, 0x12, 0xd4, 0x7b, 0x96, 0xdd, 0xea, 0x39, 0x9d, 0x56, 0x60, 0x10, - 0x60, 0x06, 0xa9, 0xf5, 0x2c, 0xfb, 0x89, 0xd3, 0x31, 0xa4, 0x59, 0x28, 0xa5, 0x79, 0x1a, 0xa5, - 0x2c, 0x0b, 0x4a, 0xf3, 0x54, 0xa5, 0x5c, 0x86, 0x59, 0xca, 0xb3, 0xed, 0x12, 0xd3, 0x27, 0x21, - 0x71, 0x85, 0x11, 0x5f, 0xe8, 0x59, 0xf6, 0x3a, 0x1b, 0x89, 0xd0, 0x9b, 0xa7, 0x23, 0xf4, 0x55, - 0x41, 0x6f, 0x9e, 0x46, 0xe9, 0xf1, 0x32, 0x94, 0x02, 0x9b, 0xa3, 0x69, 0x98, 0xdc, 0xdd, 0xdb, - 0x6d, 0xd6, 0x27, 0x10, 0x40, 0x61, 0x6d, 0x7f, 0xbd, 0xb9, 0xbb, 0x51, 0xd7, 0x50, 0x19, 0x8a, - 0x1b, 0x4d, 0xde, 0xc8, 0xe1, 0x47, 0x00, 0xa1, 0x75, 0x51, 0x11, 0xf2, 0xdb, 0xcd, 0x6f, 0xd6, - 0x27, 0x28, 0xcd, 0xf3, 0xa6, 0xb1, 0xbf, 0xb5, 0xb7, 0x5b, 0xd7, 0xe8, 0xe4, 0x75, 0xa3, 0xb9, - 0x76, 0xd0, 0xac, 0xe7, 0x28, 0xc5, 0x93, 0xbd, 0x8d, 0x7a, 0x1e, 0x95, 0x60, 0xea, 0xf9, 0xda, - 0xce, 0xb3, 0x66, 0x7d, 0x12, 0x7f, 0xa1, 0x41, 0x55, 0xac, 0x17, 0xf7, 0x09, 0xf4, 0x36, 0x14, - 0x8e, 0x99, 0x5f, 0xb0, 0xad, 0x58, 0x5e, 0xbd, 0x1a, 0x5b, 0xdc, 0x88, 0xef, 0x18, 0x82, 0x16, - 0x61, 0xc8, 0x9f, 0x0c, 0xbd, 0x46, 0x6e, 0x31, 0xbf, 0x54, 0x5e, 0xad, 0x2f, 0x73, 0x87, 0x5d, - 0xde, 0x26, 0x67, 0xcf, 0xcd, 0xee, 0x80, 0x18, 0x74, 0x10, 0x21, 0x98, 0xec, 0x39, 0x2e, 0x61, - 0x3b, 0x76, 0xda, 0x60, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0x70, 0x1b, 0xe0, - 0xe9, 0xc0, 0x4f, 0xf7, 0x8c, 0x39, 0x98, 0x1a, 0x52, 0xbe, 0xc2, 0x2b, 0x78, 0x83, 0xb9, 0x04, - 0x31, 0x3d, 0x12, 0xb8, 0x04, 0x6d, 0xa0, 0xcb, 0x50, 0xec, 0xbb, 0x64, 0xd8, 0x3a, 0x19, 0x32, - 0x8c, 0x69, 0xa3, 0x40, 0x9b, 0xdb, 0x43, 0x6c, 0x43, 0x99, 0x81, 0x64, 0xd2, 0xfb, 0x5e, 0xc8, - 0x3d, 0xc7, 0xa6, 0x8d, 0xea, 0x2e, 0xf1, 0x3e, 0x02, 0xb4, 0x41, 0xba, 0xc4, 0x27, 0x59, 0xdc, - 0x5e, 0xd1, 0x26, 0x1f, 0xd1, 0xe6, 0xc7, 0x1a, 0xcc, 0x46, 0xd8, 0x67, 0x52, 0xab, 0x01, 0xc5, - 0x0e, 0x63, 0xc6, 0x25, 0xc8, 0x1b, 0xb2, 0x89, 0x1e, 0xc0, 0xb4, 0x10, 0xc0, 0x6b, 0xe4, 0x53, - 0x56, 0xbb, 0xc8, 0x65, 0xf2, 0xf0, 0xdf, 0x35, 0x28, 0x09, 0x45, 0xf7, 0xfa, 0x68, 0x0d, 0xaa, - 0x2e, 0x6f, 0xb4, 0x98, 0x3e, 0x42, 0x22, 0x3d, 0x3d, 0x7a, 0x6c, 0x4e, 0x18, 0x15, 0x31, 0x85, - 0x75, 0xa3, 0xff, 0x83, 0xb2, 0x64, 0xd1, 0x1f, 0xf8, 0xc2, 0xe4, 0x8d, 0x28, 0x83, 0x70, 0xe7, - 0x6c, 0x4e, 0x18, 0x20, 0xc8, 0x9f, 0x0e, 0x7c, 0x74, 0x00, 0x73, 0x72, 0x32, 0xd7, 0x46, 0x88, - 0x91, 0x67, 0x5c, 0x16, 0xa3, 0x5c, 0x46, 0x97, 0x6a, 0x73, 0xc2, 0x40, 0x62, 0xbe, 0x32, 0xf8, - 0xa8, 0x04, 0x45, 0xd1, 0x8b, 0xff, 0xa1, 0x01, 0x48, 0x83, 0xee, 0xf5, 0xd1, 0x06, 0xd4, 0x5c, - 0xd1, 0x8a, 0x28, 0x7c, 0x25, 0x51, 0x61, 0xb1, 0x0e, 0x13, 0x46, 0x55, 0x4e, 0xe2, 0x2a, 0xbf, - 0x0b, 0x95, 0x80, 0x4b, 0xa8, 0xf3, 0x7c, 0x82, 0xce, 0x01, 0x87, 0xb2, 0x9c, 0x40, 0xb5, 0xfe, - 0x00, 0x2e, 0x06, 0xf3, 0x13, 0xd4, 0xbe, 0x31, 0x46, 0xed, 0x80, 0xe1, 0xac, 0xe4, 0xa0, 0x2a, - 0x0e, 0xf4, 0xac, 0xe1, 0xdd, 0xf8, 0xd7, 0x79, 0x28, 0xae, 0x3b, 0xbd, 0xbe, 0xe9, 0xd2, 0x35, - 0x2a, 0xb8, 0xc4, 0x1b, 0x74, 0x7d, 0xa6, 0x6e, 0x6d, 0xf5, 0x66, 0x14, 0x41, 0x90, 0xc9, 0xbf, - 0x06, 0x23, 0x35, 0xc4, 0x14, 0x3a, 0x59, 0x1c, 0x2d, 0xb9, 0x57, 0x98, 0x2c, 0x0e, 0x16, 0x31, - 0x45, 0xfa, 0x52, 0x3e, 0xf4, 0x25, 0x1d, 0x8a, 0x43, 0xe2, 0x86, 0xc7, 0xe1, 0xe6, 0x84, 0x21, - 0x3b, 0xd0, 0x3d, 0x98, 0x89, 0x87, 0xe6, 0x29, 0x41, 0x53, 0x6b, 0x47, 0x23, 0xf9, 0x4d, 0xa8, - 0x44, 0xce, 0x87, 0x82, 0xa0, 0x2b, 0xf7, 0x94, 0xe3, 0xe1, 0x92, 0x0c, 0x4a, 0xf4, 0x2c, 0xab, - 0x6c, 0x4e, 0x88, 0xb0, 0x84, 0xff, 0x1f, 0xaa, 0x11, 0x5d, 0x69, 0xf8, 0x6d, 0xbe, 0xff, 0x6c, - 0x6d, 0x87, 0xc7, 0xea, 0xc7, 0x2c, 0x3c, 0x1b, 0x75, 0x8d, 0x86, 0xfc, 0x9d, 0xe6, 0xfe, 0x7e, - 0x3d, 0x87, 0xaa, 0x50, 0xda, 0xdd, 0x3b, 0x68, 0x71, 0xaa, 0x3c, 0x7e, 0x27, 0xe0, 0x20, 0x62, - 0xbd, 0x12, 0xe2, 0x27, 0x94, 0x10, 0xaf, 0xc9, 0x10, 0x9f, 0x0b, 0x43, 0x7c, 0xfe, 0x51, 0x0d, - 0x2a, 0xdc, 0x3e, 0xad, 0x81, 0x4d, 0x8f, 0x99, 0x5f, 0x6a, 0x00, 0x07, 0xa7, 0xb6, 0x0c, 0x40, - 0x2b, 0x50, 0x6c, 0x73, 0xe6, 0x0d, 0x8d, 0xf9, 0xf3, 0xc5, 0x44, 0x93, 0x1b, 0x92, 0x0a, 0xbd, - 0x09, 0x45, 0x6f, 0xd0, 0x6e, 0x13, 0x4f, 0x86, 0xfb, 0xcb, 0xf1, 0x90, 0x22, 0x1c, 0xde, 0x90, - 0x74, 0x74, 0xca, 0x0b, 0xd3, 0xea, 0x0e, 0x58, 0xf0, 0x1f, 0x3f, 0x45, 0xd0, 0xe1, 0x9f, 0x69, - 0x50, 0x66, 0x52, 0x66, 0x8a, 0x63, 0x57, 0xa1, 0xc4, 0x64, 0x20, 0x1d, 0x11, 0xc9, 0xa6, 0x8d, - 0xb0, 0x03, 0xfd, 0x2f, 0x94, 0xe4, 0x0e, 0x96, 0xc1, 0xac, 0x91, 0xcc, 0x76, 0xaf, 0x6f, 0x84, - 0xa4, 0x78, 0x1b, 0x2e, 0x30, 0xab, 0xb4, 0x69, 0x62, 0x29, 0xed, 0xa8, 0xa6, 0x5e, 0x5a, 0x2c, - 0xf5, 0xd2, 0x61, 0xba, 0x7f, 0x7c, 0xe6, 0x59, 0x6d, 0xb3, 0x2b, 0xa4, 0x08, 0xda, 0xf8, 0x1b, - 0x80, 0x54, 0x66, 0x59, 0xd4, 0xc5, 0x55, 0x28, 0x6f, 0x9a, 0xde, 0xb1, 0x10, 0x09, 0x7f, 0x08, - 0x15, 0xde, 0xcc, 0x64, 0x43, 0x04, 0x93, 0xc7, 0xa6, 0x77, 0xcc, 0x04, 0xaf, 0x1a, 0xec, 0x1b, - 0x5f, 0x80, 0x99, 0x7d, 0xdb, 0xec, 0x7b, 0xc7, 0x8e, 0x8c, 0xb5, 0x34, 0xb1, 0xae, 0x87, 0x7d, - 0x99, 0x10, 0xef, 0xc2, 0x8c, 0x4b, 0x7a, 0xa6, 0x65, 0x5b, 0xf6, 0x51, 0xeb, 0xf0, 0xcc, 0x27, - 0x9e, 0xc8, 0xbb, 0x6b, 0x41, 0xf7, 0x23, 0xda, 0x4b, 0x45, 0x3b, 0xec, 0x3a, 0x87, 0xc2, 0xe3, - 0xd9, 0x37, 0xfe, 0x8d, 0x06, 0x95, 0x0f, 0x4c, 0xbf, 0x2d, 0xad, 0x80, 0xb6, 0xa0, 0x16, 0xf8, - 0x39, 0xeb, 0x11, 0xb2, 0xc4, 0x02, 0x3e, 0x9b, 0x23, 0x33, 0x32, 0x19, 0xf0, 0xab, 0x6d, 0xb5, - 0x83, 0xb1, 0x32, 0xed, 0x36, 0xe9, 0x06, 0xac, 0x72, 0xe9, 0xac, 0x18, 0xa1, 0xca, 0x4a, 0xed, - 0x78, 0x34, 0x13, 0x1e, 0x86, 0xdc, 0x2d, 0x7f, 0x9e, 0x03, 0x34, 0x2a, 0xc3, 0x57, 0xcd, 0x0f, - 0x6e, 0x43, 0xcd, 0xf3, 0x4d, 0xd7, 0x6f, 0xc5, 0x6e, 0x25, 0x55, 0xd6, 0x1b, 0xc4, 0xaa, 0xbb, - 0x30, 0xd3, 0x77, 0x9d, 0x23, 0x97, 0x78, 0x5e, 0xcb, 0x76, 0x7c, 0xeb, 0xc5, 0x99, 0x48, 0x8e, - 0x6a, 0xb2, 0x7b, 0x97, 0xf5, 0xa2, 0x26, 0x14, 0x5f, 0x58, 0x5d, 0x9f, 0xb8, 0x5e, 0x63, 0x6a, - 0x31, 0xbf, 0x54, 0x5b, 0x7d, 0x70, 0x9e, 0xd5, 0x96, 0xdf, 0x63, 0xf4, 0x07, 0x67, 0x7d, 0x62, - 0xc8, 0xb9, 0x6a, 0xda, 0x52, 0x88, 0xa4, 0x2d, 0xb7, 0x01, 0x42, 0x7a, 0x1a, 0xb5, 0x76, 0xf7, - 0x9e, 0x3e, 0x3b, 0xa8, 0x4f, 0xa0, 0x0a, 0x4c, 0xef, 0xee, 0x6d, 0x34, 0x77, 0x9a, 0x34, 0xae, - 0xe1, 0x15, 0x69, 0x1b, 0xd5, 0x86, 0x68, 0x1e, 0xa6, 0x5f, 0xd2, 0x5e, 0x79, 0x6d, 0xcb, 0x1b, - 0x45, 0xd6, 0xde, 0xea, 0xe0, 0xbf, 0x69, 0x50, 0x15, 0xbb, 0x20, 0xd3, 0x56, 0x54, 0x21, 0x72, - 0x11, 0x08, 0x9a, 0x23, 0xf1, 0xdd, 0xd1, 0x11, 0xa9, 0x98, 0x6c, 0x52, 0x77, 0xe7, 0x8b, 0x4d, - 0x3a, 0xc2, 0xac, 0x41, 0x1b, 0xdd, 0x83, 0x7a, 0x9b, 0xbb, 0x7b, 0xec, 0xd8, 0x31, 0x66, 0x44, - 0x7f, 0xb0, 0x48, 0xb7, 0xa1, 0x40, 0x86, 0xc4, 0xf6, 0xbd, 0x46, 0x99, 0xc5, 0xa6, 0xaa, 0x4c, - 0xb4, 0x9a, 0xb4, 0xd7, 0x10, 0x83, 0xf8, 0x7f, 0xe0, 0xc2, 0x0e, 0xcd, 0x74, 0x1f, 0xbb, 0xa6, - 0xad, 0xe6, 0xcc, 0x07, 0x07, 0x3b, 0xc2, 0x2a, 0xf4, 0x13, 0xd5, 0x20, 0xb7, 0xb5, 0x21, 0x74, - 0xc8, 0x6d, 0x6d, 0xe0, 0x4f, 0x35, 0x40, 0xea, 0xbc, 0x4c, 0x66, 0x8a, 0x31, 0x97, 0xf0, 0xf9, - 0x10, 0x7e, 0x0e, 0xa6, 0x88, 0xeb, 0x3a, 0x2e, 0x33, 0x48, 0xc9, 0xe0, 0x0d, 0x7c, 0x4b, 0xc8, - 0x60, 0x90, 0xa1, 0x73, 0x12, 0xec, 0x79, 0xce, 0x4d, 0x0b, 0x44, 0xdd, 0x86, 0xd9, 0x08, 0x55, - 0xa6, 0x18, 0x79, 0x17, 0x2e, 0x32, 0x66, 0xdb, 0x84, 0xf4, 0xd7, 0xba, 0xd6, 0x30, 0x15, 0xb5, - 0x0f, 0x97, 0xe2, 0x84, 0x5f, 0xaf, 0x8d, 0xf0, 0x3b, 0x02, 0xf1, 0xc0, 0xea, 0x91, 0x03, 0x67, - 0x27, 0x5d, 0x36, 0x1a, 0xf8, 0xe8, 0x4d, 0x58, 0x1c, 0x26, 0xec, 0x1b, 0xff, 0x4a, 0x83, 0xcb, - 0x23, 0xd3, 0xbf, 0xe6, 0x55, 0x5d, 0x00, 0x38, 0xa2, 0xdb, 0x87, 0x74, 0xe8, 0x00, 0xbf, 0xc3, - 0x29, 0x3d, 0x81, 0x9c, 0x34, 0x76, 0x54, 0x84, 0x9c, 0xc7, 0x50, 0x78, 0xc2, 0xca, 0x27, 0x8a, - 0x56, 0x93, 0x52, 0x2b, 0xdb, 0xec, 0xf1, 0x5b, 0x5d, 0xc9, 0x60, 0xdf, 0xec, 0xe8, 0x24, 0xc4, - 0x7d, 0x66, 0xec, 0xf0, 0x23, 0xba, 0x64, 0x04, 0x6d, 0x8a, 0xde, 0xee, 0x5a, 0xc4, 0xf6, 0xd9, - 0xe8, 0x24, 0x1b, 0x55, 0x7a, 0xf0, 0x32, 0xd4, 0x39, 0xd2, 0x5a, 0xa7, 0xa3, 0x1c, 0xd3, 0x01, - 0x3f, 0x2d, 0xca, 0x0f, 0xbf, 0x84, 0x0b, 0x0a, 0x7d, 0x26, 0xd3, 0xbd, 0x06, 0x05, 0x5e, 0x23, - 0x12, 0x27, 0xc4, 0x5c, 0x74, 0x16, 0x87, 0x31, 0x04, 0x0d, 0xbe, 0x0d, 0xb3, 0xa2, 0x87, 0xf4, - 0x9c, 0xa4, 0x55, 0x67, 0xf6, 0xc1, 0x3b, 0x30, 0x17, 0x25, 0xcb, 0xe4, 0x08, 0x6b, 0x12, 0xf4, - 0x59, 0xbf, 0xa3, 0x1c, 0x38, 0xf1, 0x45, 0x51, 0x0d, 0x96, 0x8b, 0x19, 0x2c, 0x10, 0x48, 0xb2, - 0xc8, 0x24, 0xd0, 0xac, 0x34, 0xff, 0x8e, 0xe5, 0x05, 0x69, 0xc5, 0x27, 0x80, 0xd4, 0xce, 0x4c, - 0x8b, 0xb2, 0x0c, 0x45, 0x6e, 0x70, 0x99, 0xb9, 0x26, 0xaf, 0x8a, 0x24, 0xa2, 0x02, 0x6d, 0x90, - 0x17, 0xae, 0x79, 0xd4, 0x23, 0x41, 0x64, 0xa5, 0xf9, 0x9a, 0xda, 0x99, 0x49, 0xe3, 0x3f, 0x68, - 0x50, 0x59, 0xeb, 0x9a, 0x6e, 0x4f, 0x1a, 0xff, 0x5d, 0x28, 0xf0, 0x44, 0x50, 0xdc, 0x9d, 0xee, - 0x44, 0xd9, 0xa8, 0xb4, 0xbc, 0xb1, 0xc6, 0xd3, 0x46, 0x31, 0x8b, 0x2e, 0x96, 0x28, 0x4d, 0x6e, - 0xc4, 0x4a, 0x95, 0x1b, 0xe8, 0x75, 0x98, 0x32, 0xe9, 0x14, 0xe6, 0xbf, 0xb5, 0x78, 0x0a, 0xce, - 0xb8, 0xb1, 0x43, 0x9b, 0x53, 0xe1, 0xb7, 0xa1, 0xac, 0x20, 0xd0, 0x9b, 0xc5, 0xe3, 0xa6, 0x38, - 0x98, 0xd7, 0xd6, 0x0f, 0xb6, 0x9e, 0xf3, 0x0b, 0x47, 0x0d, 0x60, 0xa3, 0x19, 0xb4, 0x73, 0xf8, - 0x43, 0x31, 0x4b, 0x78, 0xb8, 0x2a, 0x8f, 0x96, 0x26, 0x4f, 0xee, 0x95, 0xe4, 0x39, 0x85, 0xaa, - 0x50, 0x3f, 0xd3, 0x1e, 0x78, 0x13, 0x0a, 0x8c, 0x9f, 0xdc, 0x02, 0xf3, 0x09, 0xb0, 0xd2, 0x3b, - 0x39, 0x21, 0x9e, 0x81, 0xea, 0xbe, 0x6f, 0xfa, 0x03, 0x4f, 0x6e, 0x81, 0xdf, 0x6b, 0x50, 0x93, - 0x3d, 0x59, 0xcb, 0x2c, 0xf2, 0x7a, 0xca, 0x63, 0x5e, 0x70, 0x39, 0xbd, 0x04, 0x85, 0xce, 0xe1, - 0xbe, 0xf5, 0x89, 0x2c, 0x66, 0x89, 0x16, 0xed, 0xef, 0x72, 0x1c, 0x5e, 0x50, 0x16, 0x2d, 0x7a, - 0xd1, 0x71, 0xcd, 0x17, 0xfe, 0x96, 0xdd, 0x21, 0xa7, 0x2c, 0x9f, 0x98, 0x34, 0xc2, 0x0e, 0x76, - 0x37, 0x11, 0x85, 0x67, 0x96, 0x7f, 0xa9, 0x85, 0xe8, 0x59, 0xb8, 0xb0, 0x36, 0xf0, 0x8f, 0x9b, - 0xb6, 0x79, 0xd8, 0x95, 0x41, 0x00, 0xcf, 0x01, 0xa2, 0x9d, 0x1b, 0x96, 0xa7, 0xf6, 0x36, 0x61, - 0x96, 0xf6, 0x12, 0xdb, 0xb7, 0xda, 0x4a, 0xc4, 0x90, 0x61, 0x5b, 0x8b, 0x85, 0x6d, 0xd3, 0xf3, - 0x5e, 0x3a, 0x6e, 0x47, 0xa8, 0x16, 0xb4, 0xf1, 0x06, 0x67, 0xfe, 0xcc, 0x8b, 0x04, 0xe6, 0xaf, - 0xca, 0x65, 0x29, 0xe4, 0xf2, 0x98, 0xf8, 0x63, 0xb8, 0xe0, 0x07, 0x70, 0x51, 0x52, 0x8a, 0xfa, - 0xc5, 0x18, 0xe2, 0x3d, 0xb8, 0x26, 0x89, 0xd7, 0x8f, 0x69, 0x56, 0xfd, 0x54, 0x00, 0xfe, 0xa7, - 0x72, 0x3e, 0x82, 0x46, 0x20, 0x27, 0xcb, 0xb4, 0x9c, 0xae, 0x2a, 0xc0, 0xc0, 0x13, 0x7b, 0xa6, - 0x64, 0xb0, 0x6f, 0xda, 0xe7, 0x3a, 0xdd, 0xe0, 0x10, 0xa4, 0xdf, 0x78, 0x1d, 0xe6, 0x25, 0x0f, - 0x91, 0x03, 0x45, 0x99, 0x8c, 0x08, 0x94, 0xc4, 0x44, 0x18, 0x8c, 0x4e, 0x1d, 0x6f, 0x76, 0x95, - 0x32, 0x6a, 0x5a, 0xc6, 0x53, 0x53, 0x78, 0x5e, 0xe4, 0x3b, 0x82, 0x0a, 0xa6, 0x06, 0x6d, 0xd1, - 0x4d, 0x19, 0xa8, 0xdd, 0x62, 0x21, 0x68, 0xf7, 0xc8, 0x42, 0x8c, 0xb0, 0xfe, 0x08, 0x16, 0x02, - 0x21, 0xa8, 0xdd, 0x9e, 0x12, 0xb7, 0x67, 0x79, 0x9e, 0x72, 0xe3, 0x4e, 0x52, 0xfc, 0x0e, 0x4c, - 0xf6, 0x89, 0x88, 0x29, 0xe5, 0x55, 0xb4, 0xcc, 0x9f, 0x87, 0x96, 0x95, 0xc9, 0x6c, 0x1c, 0x77, - 0xe0, 0xba, 0xe4, 0xce, 0x2d, 0x9a, 0xc8, 0x3e, 0x2e, 0x94, 0xbc, 0x8d, 0x71, 0xb3, 0x8e, 0xde, - 0xc6, 0xf2, 0x7c, 0xed, 0xe5, 0x6d, 0x8c, 0x9e, 0x15, 0xaa, 0x6f, 0x65, 0x3a, 0x2b, 0xb6, 0xb9, - 0x4d, 0x03, 0x97, 0xcc, 0xc4, 0xec, 0x10, 0xe6, 0xa2, 0x9e, 0x9c, 0x29, 0x8c, 0xcd, 0xc1, 0x94, - 0xef, 0x9c, 0x10, 0x19, 0xc4, 0x78, 0x43, 0x0a, 0x1c, 0xb8, 0x79, 0x26, 0x81, 0xcd, 0x90, 0x19, - 0xdb, 0x92, 0x59, 0xe5, 0xa5, 0xab, 0x29, 0xf3, 0x19, 0xde, 0xc0, 0xbb, 0x70, 0x29, 0x1e, 0x26, - 0x32, 0x89, 0xfc, 0x9c, 0x6f, 0xe0, 0xa4, 0x48, 0x92, 0x89, 0xef, 0xfb, 0x61, 0x30, 0x50, 0x02, - 0x4a, 0x26, 0x96, 0x06, 0xe8, 0x49, 0xf1, 0xe5, 0xbf, 0xb1, 0x5f, 0x83, 0x70, 0x93, 0x89, 0x99, - 0x17, 0x32, 0xcb, 0xbe, 0xfc, 0x61, 0x8c, 0xc8, 0x8f, 0x8d, 0x11, 0xc2, 0x49, 0xc2, 0x28, 0xf6, - 0x35, 0x6c, 0x3a, 0x81, 0x11, 0x06, 0xd0, 0xac, 0x18, 0xf4, 0x0c, 0x09, 0x30, 0x58, 0x43, 0x6e, - 0x6c, 0x35, 0xec, 0x66, 0x5a, 0x8c, 0x0f, 0xc2, 0xd8, 0x39, 0x12, 0x99, 0x33, 0x31, 0xfe, 0x10, - 0x16, 0xd3, 0x83, 0x72, 0x16, 0xce, 0xf7, 0x31, 0x94, 0x82, 0x84, 0x52, 0x79, 0x5a, 0x2d, 0x43, - 0x71, 0x77, 0x6f, 0xff, 0xe9, 0xda, 0x7a, 0xb3, 0xae, 0xad, 0xfe, 0x33, 0x0f, 0xb9, 0xed, 0xe7, - 0xe8, 0x5b, 0x30, 0xc5, 0x1f, 0x5e, 0xc6, 0xbc, 0x4b, 0xe9, 0xe3, 0x9e, 0x70, 0xf0, 0xd5, 0x4f, - 0xff, 0xf4, 0xd7, 0x2f, 0x72, 0x97, 0xf0, 0x85, 0x95, 0xe1, 0x5b, 0x66, 0xb7, 0x7f, 0x6c, 0xae, - 0x9c, 0x0c, 0x57, 0xd8, 0x99, 0xf0, 0x50, 0xbb, 0x8f, 0x9e, 0x43, 0xfe, 0xe9, 0xc0, 0x47, 0xa9, - 0x8f, 0x56, 0x7a, 0xfa, 0xd3, 0x0e, 0xd6, 0x19, 0xe7, 0x39, 0x3c, 0xa3, 0x72, 0xee, 0x0f, 0x7c, - 0xca, 0x77, 0x08, 0x65, 0xe5, 0x75, 0x06, 0x9d, 0xfb, 0x9c, 0xa5, 0x9f, 0xff, 0xf2, 0x83, 0x31, - 0xc3, 0xbb, 0x8a, 0x2f, 0xab, 0x78, 0xfc, 0x11, 0x49, 0xd5, 0xe7, 0xe0, 0xd4, 0x8e, 0xeb, 0x13, - 0x3e, 0x30, 0xc4, 0xf5, 0x51, 0x8a, 0xfa, 0xc9, 0xfa, 0xf8, 0xa7, 0x36, 0xe5, 0xeb, 0x88, 0x17, - 0xa5, 0xb6, 0x8f, 0xae, 0x27, 0xbc, 0x48, 0xa8, 0xb5, 0x77, 0x7d, 0x31, 0x9d, 0x40, 0x20, 0xdd, - 0x60, 0x48, 0x57, 0xf0, 0x25, 0x15, 0xa9, 0x1d, 0xd0, 0x3d, 0xd4, 0xee, 0xaf, 0x1e, 0xc3, 0x14, - 0xab, 0x18, 0xa2, 0x96, 0xfc, 0xd0, 0x13, 0x6a, 0x9d, 0x29, 0x3b, 0x20, 0x52, 0x6b, 0xc4, 0xf3, - 0x0c, 0x6d, 0x16, 0xd7, 0x02, 0x34, 0x56, 0x34, 0x7c, 0xa8, 0xdd, 0x5f, 0xd2, 0xde, 0xd0, 0x56, - 0xbf, 0x3f, 0x09, 0x53, 0xac, 0x52, 0x83, 0xfa, 0x00, 0x61, 0x0d, 0x2e, 0xae, 0xe7, 0x48, 0x55, - 0x2f, 0xae, 0xe7, 0x68, 0xf9, 0x0e, 0x5f, 0x67, 0xc8, 0xf3, 0x78, 0x2e, 0x40, 0x66, 0xaf, 0xe0, - 0x2b, 0xac, 0x26, 0x43, 0xcd, 0xfa, 0x12, 0xca, 0x4a, 0x2d, 0x0d, 0x25, 0x71, 0x8c, 0x14, 0xe3, - 0xe2, 0xdb, 0x24, 0xa1, 0x10, 0x87, 0x6f, 0x32, 0xd0, 0x6b, 0xb8, 0xa1, 0x1a, 0x97, 0xe3, 0xba, - 0x8c, 0x92, 0x02, 0x7f, 0xa6, 0x41, 0x2d, 0x5a, 0x4f, 0x43, 0x37, 0x13, 0x58, 0xc7, 0xcb, 0x72, - 0xfa, 0xad, 0xf1, 0x44, 0xa9, 0x22, 0x70, 0xfc, 0x13, 0x42, 0xfa, 0x26, 0xa5, 0x14, 0xb6, 0x47, - 0x3f, 0xd0, 0x60, 0x26, 0x56, 0x25, 0x43, 0x49, 0x10, 0x23, 0x35, 0x38, 0xfd, 0xf6, 0x39, 0x54, - 0x42, 0x92, 0xbb, 0x4c, 0x92, 0x1b, 0xf8, 0xea, 0xa8, 0x31, 0x7c, 0xab, 0x47, 0x7c, 0x47, 0x48, - 0xb3, 0xfa, 0xaf, 0x3c, 0x14, 0xd7, 0xf9, 0xaf, 0x8c, 0x90, 0x0f, 0xa5, 0xa0, 0xf2, 0x84, 0x16, - 0x92, 0xaa, 0x12, 0x61, 0xca, 0xae, 0x5f, 0x4f, 0x1d, 0x17, 0x22, 0xdc, 0x61, 0x22, 0x2c, 0xe2, - 0x2b, 0x81, 0x08, 0xe2, 0xd7, 0x4c, 0x2b, 0xfc, 0xf2, 0xbd, 0x62, 0x76, 0x3a, 0x74, 0x49, 0xbe, - 0xa7, 0x41, 0x45, 0x2d, 0x28, 0xa1, 0x1b, 0x89, 0xf5, 0x10, 0xb5, 0x26, 0xa5, 0xe3, 0x71, 0x24, - 0x02, 0xff, 0x1e, 0xc3, 0xbf, 0x89, 0x17, 0xd2, 0xf0, 0x5d, 0x46, 0x1f, 0x15, 0x81, 0x97, 0x90, - 0x92, 0x45, 0x88, 0x54, 0xa8, 0x92, 0x45, 0x88, 0x56, 0xa0, 0xce, 0x17, 0x61, 0xc0, 0xe8, 0xa9, - 0x08, 0xa7, 0x00, 0x61, 0x85, 0x09, 0x25, 0x1a, 0x57, 0xb9, 0xc4, 0xc4, 0x7d, 0x70, 0xb4, 0x38, - 0x95, 0xb0, 0x03, 0x62, 0xd8, 0x5d, 0xcb, 0xa3, 0xbe, 0xb8, 0xfa, 0xdb, 0x49, 0x28, 0x3f, 0x31, - 0x2d, 0xdb, 0x27, 0xb6, 0x69, 0xb7, 0x09, 0x3a, 0x82, 0x29, 0x76, 0x4a, 0xc5, 0x03, 0x8f, 0x5a, - 0xf6, 0x89, 0x07, 0x9e, 0x48, 0x4d, 0x04, 0xdf, 0x66, 0xd0, 0xd7, 0xb1, 0x1e, 0x40, 0xf7, 0x42, - 0xfe, 0x2b, 0xac, 0x9e, 0x41, 0x55, 0x3e, 0x81, 0x02, 0xaf, 0x5f, 0xa0, 0x18, 0xb7, 0x48, 0x9d, - 0x43, 0xbf, 0x9a, 0x3c, 0x98, 0xba, 0xcb, 0x54, 0x2c, 0x8f, 0x11, 0x53, 0xb0, 0x6f, 0x03, 0x84, - 0x05, 0xb3, 0xb8, 0x7d, 0x47, 0xea, 0x6b, 0xfa, 0x62, 0x3a, 0x81, 0x00, 0xbe, 0xcf, 0x80, 0x6f, - 0xe1, 0xeb, 0x89, 0xc0, 0x9d, 0x60, 0x02, 0x05, 0x6f, 0xc3, 0xe4, 0xa6, 0xe9, 0x1d, 0xa3, 0xd8, - 0x21, 0xa4, 0xbc, 0x92, 0xea, 0x7a, 0xd2, 0x90, 0x80, 0xba, 0xc5, 0xa0, 0x16, 0xf0, 0x7c, 0x22, - 0xd4, 0xb1, 0xe9, 0xd1, 0x98, 0x8e, 0x06, 0x30, 0x2d, 0x5f, 0x3e, 0xd1, 0xb5, 0x98, 0xcd, 0xa2, - 0xaf, 0xa4, 0xfa, 0x42, 0xda, 0xb0, 0x00, 0x5c, 0x62, 0x80, 0x18, 0x5f, 0x4b, 0x36, 0xaa, 0x20, - 0x7f, 0xa8, 0xdd, 0x7f, 0x43, 0x5b, 0xfd, 0x51, 0x1d, 0x26, 0x69, 0xbe, 0x44, 0x4f, 0x91, 0xf0, - 0x9a, 0x19, 0xb7, 0xf0, 0x48, 0x71, 0x27, 0x6e, 0xe1, 0xd1, 0x1b, 0x6a, 0xc2, 0x29, 0xc2, 0x7e, - 0x6b, 0x49, 0x18, 0x15, 0xd5, 0xd8, 0x87, 0xb2, 0x72, 0x19, 0x45, 0x09, 0x1c, 0xa3, 0xa5, 0xa3, - 0xf8, 0x29, 0x92, 0x70, 0x93, 0xc5, 0x8b, 0x0c, 0x54, 0xc7, 0x17, 0xa3, 0xa0, 0x1d, 0x4e, 0x46, - 0x51, 0xbf, 0x03, 0x15, 0xf5, 0xd6, 0x8a, 0x12, 0x98, 0xc6, 0x6a, 0x53, 0xf1, 0x58, 0x91, 0x74, - 0xe9, 0x4d, 0x70, 0x9a, 0xe0, 0x97, 0xa5, 0x92, 0x96, 0xa2, 0x7f, 0x0c, 0x45, 0x71, 0x97, 0x4d, - 0xd2, 0x37, 0x5a, 0xcd, 0x4a, 0xd2, 0x37, 0x76, 0x11, 0x4e, 0x48, 0x49, 0x18, 0x2c, 0xcd, 0xd9, - 0x65, 0x80, 0x16, 0x90, 0x8f, 0x89, 0x9f, 0x06, 0x19, 0xd6, 0x67, 0xd2, 0x20, 0x95, 0xfb, 0xd2, - 0x58, 0xc8, 0x23, 0xe2, 0x8b, 0xbd, 0x2c, 0x2f, 0x23, 0x28, 0x85, 0xa3, 0x1a, 0x0d, 0xf1, 0x38, - 0x92, 0xd4, 0x2c, 0x32, 0x44, 0x15, 0xa1, 0x10, 0x7d, 0x17, 0x20, 0xbc, 0x78, 0xc7, 0x13, 0x83, - 0xc4, 0xea, 0x5d, 0x3c, 0x31, 0x48, 0xbe, 0xbb, 0x27, 0x78, 0x70, 0x08, 0xce, 0x33, 0x59, 0x0a, - 0xff, 0x13, 0x0d, 0xd0, 0xe8, 0x45, 0x1d, 0x3d, 0x48, 0x86, 0x48, 0x2c, 0x0c, 0xea, 0xaf, 0xbd, - 0x1a, 0x71, 0x6a, 0xf4, 0x0c, 0xe5, 0x6a, 0xb3, 0x29, 0xfd, 0x97, 0x54, 0xb2, 0xcf, 0x35, 0xa8, - 0x46, 0xae, 0xfa, 0xe8, 0x4e, 0xca, 0x3a, 0xc7, 0x8a, 0x8b, 0xfa, 0xdd, 0x73, 0xe9, 0x52, 0x73, - 0x27, 0x65, 0x57, 0xc8, 0xbc, 0xf1, 0x87, 0x1a, 0xd4, 0xa2, 0xf5, 0x01, 0x94, 0x02, 0x30, 0x52, - 0xa1, 0xd4, 0x97, 0xce, 0x27, 0x7c, 0x85, 0xd5, 0x0a, 0x53, 0xc9, 0x8f, 0xa1, 0x28, 0xca, 0x0a, - 0x49, 0x6e, 0x11, 0x2d, 0x70, 0x26, 0xb9, 0x45, 0xac, 0x26, 0x91, 0xe6, 0x16, 0xf4, 0x86, 0xae, - 0x78, 0xa2, 0x28, 0x3e, 0xa4, 0x41, 0x8e, 0xf7, 0xc4, 0x58, 0xe5, 0x62, 0x2c, 0x64, 0xe8, 0x89, - 0xb2, 0xf4, 0x80, 0x52, 0x38, 0x9e, 0xe3, 0x89, 0xf1, 0xca, 0x45, 0x9a, 0x27, 0x32, 0x54, 0xc5, - 0x13, 0xc3, 0x4a, 0x41, 0x92, 0x27, 0x8e, 0x94, 0x6f, 0x93, 0x3c, 0x71, 0xb4, 0xd8, 0x90, 0xb6, - 0xb6, 0x0c, 0x3c, 0xe2, 0x89, 0xb3, 0x09, 0x95, 0x05, 0xf4, 0x5a, 0x8a, 0x4d, 0x13, 0x4b, 0xc3, - 0xfa, 0xeb, 0xaf, 0x48, 0x3d, 0xde, 0x03, 0xf8, 0x6a, 0x48, 0x0f, 0xf8, 0x85, 0x06, 0x73, 0x49, - 0xa5, 0x09, 0x94, 0x02, 0x96, 0x52, 0x57, 0xd6, 0x97, 0x5f, 0x95, 0xfc, 0x15, 0xec, 0x16, 0xf8, - 0xc4, 0xa3, 0xfa, 0xef, 0xbe, 0x5c, 0xd0, 0xfe, 0xf8, 0xe5, 0x82, 0xf6, 0xe7, 0x2f, 0x17, 0xb4, - 0x9f, 0xfe, 0x65, 0x61, 0xe2, 0xb0, 0xc0, 0xfe, 0xc3, 0xc3, 0x5b, 0xff, 0x0e, 0x00, 0x00, 0xff, - 0xff, 0x73, 0x7e, 0xb4, 0xb4, 0x77, 0x31, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go deleted file mode 100644 index 473ad582ef8..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go +++ /dev/null @@ -1,1911 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway -// source: etcdserver/etcdserverpb/rpc.proto -// DO NOT EDIT! - -/* -Package etcdserverpb is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package etcdserverpb - -import ( - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" -) - -var _ codes.Code -var _ io.Reader -var _ = runtime.String -var _ = utilities.NewDoubleArray - -func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RangeRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PutRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteRangeRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq TxnRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CompactionRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client WatchClient, req *http.Request, pathParams map[string]string) (Watch_WatchClient, runtime.ServerMetadata, error) { - var metadata runtime.ServerMetadata - stream, err := client.Watch(ctx) - if err != nil { - grpclog.Printf("Failed to start streaming: %v", err) - return nil, metadata, err - } - dec := marshaler.NewDecoder(req.Body) - handleSend := func() error { - var protoReq WatchRequest - err = dec.Decode(&protoReq) - if err == io.EOF { - return err - } - if err != nil { - grpclog.Printf("Failed to decode request: %v", err) - return err - } - if err = stream.Send(&protoReq); err != nil { - grpclog.Printf("Failed to send request: %v", err) - return err - } - return nil - } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Printf("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } - go func() { - for { - if err := handleSend(); err != nil { - break - } - } - if err := stream.CloseSend(); err != nil { - grpclog.Printf("Failed to terminate client stream: %v", err) - } - }() - header, err := stream.Header() - if err != nil { - grpclog.Printf("Failed to get header from client: %v", err) - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil -} - -func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseGrantRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseRevokeRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { - var metadata runtime.ServerMetadata - stream, err := client.LeaseKeepAlive(ctx) - if err != nil { - grpclog.Printf("Failed to start streaming: %v", err) - return nil, metadata, err - } - dec := marshaler.NewDecoder(req.Body) - handleSend := func() error { - var protoReq LeaseKeepAliveRequest - err = dec.Decode(&protoReq) - if err == io.EOF { - return err - } - if err != nil { - grpclog.Printf("Failed to decode request: %v", err) - return err - } - if err = stream.Send(&protoReq); err != nil { - grpclog.Printf("Failed to send request: %v", err) - return err - } - return nil - } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Printf("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } - go func() { - for { - if err := handleSend(); err != nil { - break - } - } - if err := stream.CloseSend(); err != nil { - grpclog.Printf("Failed to terminate client stream: %v", err) - } - }() - header, err := stream.Header() - if err != nil { - grpclog.Printf("Failed to get header from client: %v", err) - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil -} - -func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseTimeToLiveRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberAddRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberRemoveRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberUpdateRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberListRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AlarmRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DefragmentRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq HashRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (Maintenance_SnapshotClient, runtime.ServerMetadata, error) { - var protoReq SnapshotRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.Snapshot(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthEnableRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthDisableRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthenticateRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserAddRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserGetRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserListRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserDeleteRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserChangePasswordRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserGrantRoleRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserRevokeRoleRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleAddRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleGetRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleListRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleDeleteRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleGrantPermissionRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleRevokePermissionRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -// RegisterKVHandlerFromEndpoint is same as RegisterKVHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterKVHandler(ctx, mux, conn) -} - -// RegisterKVHandler registers the http handlers for service KV to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewKVClient(conn) - - mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_KV_Range_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_KV_Put_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_KV_DeleteRange_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_KV_Txn_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_KV_Compact_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "range"}, "")) - - pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "put"}, "")) - - pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "deleterange"}, "")) - - pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "txn"}, "")) - - pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "compaction"}, "")) -) - -var ( - forward_KV_Range_0 = runtime.ForwardResponseMessage - - forward_KV_Put_0 = runtime.ForwardResponseMessage - - forward_KV_DeleteRange_0 = runtime.ForwardResponseMessage - - forward_KV_Txn_0 = runtime.ForwardResponseMessage - - forward_KV_Compact_0 = runtime.ForwardResponseMessage -) - -// RegisterWatchHandlerFromEndpoint is same as RegisterWatchHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterWatchHandler(ctx, mux, conn) -} - -// RegisterWatchHandler registers the http handlers for service Watch to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewWatchClient(conn) - - mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Watch_Watch_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3alpha", "watch"}, "")) -) - -var ( - forward_Watch_Watch_0 = runtime.ForwardResponseStream -) - -// RegisterLeaseHandlerFromEndpoint is same as RegisterLeaseHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterLeaseHandler(ctx, mux, conn) -} - -// RegisterLeaseHandler registers the http handlers for service Lease to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewLeaseClient(conn) - - mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Lease_LeaseGrant_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Lease_LeaseRevoke_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Lease_LeaseKeepAlive_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Lease_LeaseTimeToLive_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lease", "grant"}, "")) - - pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "revoke"}, "")) - - pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lease", "keepalive"}, "")) - - pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "timetolive"}, "")) -) - -var ( - forward_Lease_LeaseGrant_0 = runtime.ForwardResponseMessage - - forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage - - forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream - - forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage -) - -// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterClusterHandler(ctx, mux, conn) -} - -// RegisterClusterHandler registers the http handlers for service Cluster to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewClusterClient(conn) - - mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Cluster_MemberAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Cluster_MemberRemove_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Cluster_MemberUpdate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Cluster_MemberList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "add"}, "")) - - pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "remove"}, "")) - - pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "update"}, "")) - - pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "list"}, "")) -) - -var ( - forward_Cluster_MemberAdd_0 = runtime.ForwardResponseMessage - - forward_Cluster_MemberRemove_0 = runtime.ForwardResponseMessage - - forward_Cluster_MemberUpdate_0 = runtime.ForwardResponseMessage - - forward_Cluster_MemberList_0 = runtime.ForwardResponseMessage -) - -// RegisterMaintenanceHandlerFromEndpoint is same as RegisterMaintenanceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterMaintenanceHandler(ctx, mux, conn) -} - -// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewMaintenanceClient(conn) - - mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Maintenance_Alarm_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Maintenance_Status_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Maintenance_Defragment_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Maintenance_Hash_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Maintenance_Snapshot_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "alarm"}, "")) - - pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "status"}, "")) - - pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "defragment"}, "")) - - pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "hash"}, "")) - - pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "snapshot"}, "")) -) - -var ( - forward_Maintenance_Alarm_0 = runtime.ForwardResponseMessage - - forward_Maintenance_Status_0 = runtime.ForwardResponseMessage - - forward_Maintenance_Defragment_0 = runtime.ForwardResponseMessage - - forward_Maintenance_Hash_0 = runtime.ForwardResponseMessage - - forward_Maintenance_Snapshot_0 = runtime.ForwardResponseStream -) - -// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterAuthHandler(ctx, mux, conn) -} - -// RegisterAuthHandler registers the http handlers for service Auth to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewAuthClient(conn) - - mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_AuthEnable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_AuthDisable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_Authenticate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserChangePassword_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserGrantRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserRevokeRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleGrantPermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleRevokePermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "auth", "enable"}, "")) - - pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "auth", "disable"}, "")) - - pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "auth", "authenticate"}, "")) - - pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "add"}, "")) - - pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "get"}, "")) - - pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "list"}, "")) - - pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "delete"}, "")) - - pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "changepw"}, "")) - - pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "grant"}, "")) - - pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "revoke"}, "")) - - pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "add"}, "")) - - pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "get"}, "")) - - pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "list"}, "")) - - pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "delete"}, "")) - - pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "grant"}, "")) - - pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "revoke"}, "")) -) - -var ( - forward_Auth_AuthEnable_0 = runtime.ForwardResponseMessage - - forward_Auth_AuthDisable_0 = runtime.ForwardResponseMessage - - forward_Auth_Authenticate_0 = runtime.ForwardResponseMessage - - forward_Auth_UserAdd_0 = runtime.ForwardResponseMessage - - forward_Auth_UserGet_0 = runtime.ForwardResponseMessage - - forward_Auth_UserList_0 = runtime.ForwardResponseMessage - - forward_Auth_UserDelete_0 = runtime.ForwardResponseMessage - - forward_Auth_UserChangePassword_0 = runtime.ForwardResponseMessage - - forward_Auth_UserGrantRole_0 = runtime.ForwardResponseMessage - - forward_Auth_UserRevokeRole_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleAdd_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleGet_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleList_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleDelete_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleGrantPermission_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleRevokePermission_0 = runtime.ForwardResponseMessage -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go deleted file mode 100644 index 25c45dfce12..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "bytes" - "crypto/sha1" - "encoding/binary" - "encoding/json" - "fmt" - "path" - "sort" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/netutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" - "github.com/coreos/go-semver/semver" -) - -// RaftCluster is a list of Members that belong to the same raft cluster -type RaftCluster struct { - id types.ID - token string - - store store.Store - be backend.Backend - - sync.Mutex // guards the fields below - version *semver.Version - members map[types.ID]*Member - // removed contains the ids of removed members in the cluster. - // removed id cannot be reused. - removed map[types.ID]bool -} - -func NewClusterFromURLsMap(token string, urlsmap types.URLsMap) (*RaftCluster, error) { - c := NewCluster(token) - for name, urls := range urlsmap { - m := NewMember(name, urls, token, nil) - if _, ok := c.members[m.ID]; ok { - return nil, fmt.Errorf("member exists with identical ID %v", m) - } - if uint64(m.ID) == raft.None { - return nil, fmt.Errorf("cannot use %x as member id", raft.None) - } - c.members[m.ID] = m - } - c.genID() - return c, nil -} - -func NewClusterFromMembers(token string, id types.ID, membs []*Member) *RaftCluster { - c := NewCluster(token) - c.id = id - for _, m := range membs { - c.members[m.ID] = m - } - return c -} - -func NewCluster(token string) *RaftCluster { - return &RaftCluster{ - token: token, - members: make(map[types.ID]*Member), - removed: make(map[types.ID]bool), - } -} - -func (c *RaftCluster) ID() types.ID { return c.id } - -func (c *RaftCluster) Members() []*Member { - c.Lock() - defer c.Unlock() - var ms MembersByID - for _, m := range c.members { - ms = append(ms, m.Clone()) - } - sort.Sort(ms) - return []*Member(ms) -} - -func (c *RaftCluster) Member(id types.ID) *Member { - c.Lock() - defer c.Unlock() - return c.members[id].Clone() -} - -// MemberByName returns a Member with the given name if exists. -// If more than one member has the given name, it will panic. -func (c *RaftCluster) MemberByName(name string) *Member { - c.Lock() - defer c.Unlock() - var memb *Member - for _, m := range c.members { - if m.Name == name { - if memb != nil { - plog.Panicf("two members with the given name %q exist", name) - } - memb = m - } - } - return memb.Clone() -} - -func (c *RaftCluster) MemberIDs() []types.ID { - c.Lock() - defer c.Unlock() - var ids []types.ID - for _, m := range c.members { - ids = append(ids, m.ID) - } - sort.Sort(types.IDSlice(ids)) - return ids -} - -func (c *RaftCluster) IsIDRemoved(id types.ID) bool { - c.Lock() - defer c.Unlock() - return c.removed[id] -} - -// PeerURLs returns a list of all peer addresses. -// The returned list is sorted in ascending lexicographical order. -func (c *RaftCluster) PeerURLs() []string { - c.Lock() - defer c.Unlock() - urls := make([]string, 0) - for _, p := range c.members { - urls = append(urls, p.PeerURLs...) - } - sort.Strings(urls) - return urls -} - -// ClientURLs returns a list of all client addresses. -// The returned list is sorted in ascending lexicographical order. -func (c *RaftCluster) ClientURLs() []string { - c.Lock() - defer c.Unlock() - urls := make([]string, 0) - for _, p := range c.members { - urls = append(urls, p.ClientURLs...) - } - sort.Strings(urls) - return urls -} - -func (c *RaftCluster) String() string { - c.Lock() - defer c.Unlock() - b := &bytes.Buffer{} - fmt.Fprintf(b, "{ClusterID:%s ", c.id) - var ms []string - for _, m := range c.members { - ms = append(ms, fmt.Sprintf("%+v", m)) - } - fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " ")) - var ids []string - for id := range c.removed { - ids = append(ids, fmt.Sprintf("%s", id)) - } - fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " ")) - return b.String() -} - -func (c *RaftCluster) genID() { - mIDs := c.MemberIDs() - b := make([]byte, 8*len(mIDs)) - for i, id := range mIDs { - binary.BigEndian.PutUint64(b[8*i:], uint64(id)) - } - hash := sha1.Sum(b) - c.id = types.ID(binary.BigEndian.Uint64(hash[:8])) -} - -func (c *RaftCluster) SetID(id types.ID) { c.id = id } - -func (c *RaftCluster) SetStore(st store.Store) { c.store = st } - -func (c *RaftCluster) SetBackend(be backend.Backend) { - c.be = be - mustCreateBackendBuckets(c.be) -} - -func (c *RaftCluster) Recover(onSet func(*semver.Version)) { - c.Lock() - defer c.Unlock() - - c.members, c.removed = membersFromStore(c.store) - c.version = clusterVersionFromStore(c.store) - mustDetectDowngrade(c.version) - onSet(c.version) - - for _, m := range c.members { - plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id) - } - if c.version != nil { - plog.Infof("set the cluster version to %v from store", version.Cluster(c.version.String())) - } -} - -// ValidateConfigurationChange takes a proposed ConfChange and -// ensures that it is still valid. -func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { - members, removed := membersFromStore(c.store) - id := types.ID(cc.NodeID) - if removed[id] { - return ErrIDRemoved - } - switch cc.Type { - case raftpb.ConfChangeAddNode: - if members[id] != nil { - return ErrIDExists - } - urls := make(map[string]bool) - for _, m := range members { - for _, u := range m.PeerURLs { - urls[u] = true - } - } - m := new(Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) - } - for _, u := range m.PeerURLs { - if urls[u] { - return ErrPeerURLexists - } - } - case raftpb.ConfChangeRemoveNode: - if members[id] == nil { - return ErrIDNotFound - } - case raftpb.ConfChangeUpdateNode: - if members[id] == nil { - return ErrIDNotFound - } - urls := make(map[string]bool) - for _, m := range members { - if m.ID == id { - continue - } - for _, u := range m.PeerURLs { - urls[u] = true - } - } - m := new(Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) - } - for _, u := range m.PeerURLs { - if urls[u] { - return ErrPeerURLexists - } - } - default: - plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode") - } - return nil -} - -// AddMember adds a new Member into the cluster, and saves the given member's -// raftAttributes into the store. The given member should have empty attributes. -// A Member with a matching id must not exist. -func (c *RaftCluster) AddMember(m *Member) { - c.Lock() - defer c.Unlock() - if c.store != nil { - mustSaveMemberToStore(c.store, m) - } - if c.be != nil { - mustSaveMemberToBackend(c.be, m) - } - - c.members[m.ID] = m - - plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.id) -} - -// RemoveMember removes a member from the store. -// The given id MUST exist, or the function panics. -func (c *RaftCluster) RemoveMember(id types.ID) { - c.Lock() - defer c.Unlock() - if c.store != nil { - mustDeleteMemberFromStore(c.store, id) - } - if c.be != nil { - mustDeleteMemberFromBackend(c.be, id) - } - - delete(c.members, id) - c.removed[id] = true - - plog.Infof("removed member %s from cluster %s", id, c.id) -} - -func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) { - c.Lock() - defer c.Unlock() - if m, ok := c.members[id]; ok { - m.Attributes = attr - if c.store != nil { - mustUpdateMemberAttrInStore(c.store, m) - } - if c.be != nil { - mustSaveMemberToBackend(c.be, m) - } - return - } - _, ok := c.removed[id] - if !ok { - plog.Panicf("error updating attributes of unknown member %s", id) - } - plog.Warningf("skipped updating attributes of removed member %s", id) -} - -func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) { - c.Lock() - defer c.Unlock() - - c.members[id].RaftAttributes = raftAttr - if c.store != nil { - mustUpdateMemberInStore(c.store, c.members[id]) - } - if c.be != nil { - mustSaveMemberToBackend(c.be, c.members[id]) - } - - plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.id) -} - -func (c *RaftCluster) Version() *semver.Version { - c.Lock() - defer c.Unlock() - if c.version == nil { - return nil - } - return semver.Must(semver.NewVersion(c.version.String())) -} - -func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*semver.Version)) { - c.Lock() - defer c.Unlock() - if c.version != nil { - plog.Noticef("updated the cluster version from %v to %v", version.Cluster(c.version.String()), version.Cluster(ver.String())) - } else { - plog.Noticef("set the initial cluster version to %v", version.Cluster(ver.String())) - } - c.version = ver - mustDetectDowngrade(c.version) - if c.store != nil { - mustSaveClusterVersionToStore(c.store, ver) - } - if c.be != nil { - mustSaveClusterVersionToBackend(c.be, ver) - } - onSet(ver) -} - -func (c *RaftCluster) IsReadyToAddNewMember() bool { - nmembers := 1 - nstarted := 0 - - for _, member := range c.members { - if member.IsStarted() { - nstarted++ - } - nmembers++ - } - - if nstarted == 1 && nmembers == 2 { - // a case of adding a new node to 1-member cluster for restoring cluster data - // https://github.com/coreos/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster - - plog.Debugf("The number of started member is 1. This cluster can accept add member request.") - return true - } - - nquorum := nmembers/2 + 1 - if nstarted < nquorum { - plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum) - return false - } - - return true -} - -func (c *RaftCluster) IsReadyToRemoveMember(id uint64) bool { - nmembers := 0 - nstarted := 0 - - for _, member := range c.members { - if uint64(member.ID) == id { - continue - } - - if member.IsStarted() { - nstarted++ - } - nmembers++ - } - - nquorum := nmembers/2 + 1 - if nstarted < nquorum { - plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum) - return false - } - - return true -} - -func membersFromStore(st store.Store) (map[types.ID]*Member, map[types.ID]bool) { - members := make(map[types.ID]*Member) - removed := make(map[types.ID]bool) - e, err := st.Get(StoreMembersPrefix, true, true) - if err != nil { - if isKeyNotFound(err) { - return members, removed - } - plog.Panicf("get storeMembers should never fail: %v", err) - } - for _, n := range e.Node.Nodes { - var m *Member - m, err = nodeToMember(n) - if err != nil { - plog.Panicf("nodeToMember should never fail: %v", err) - } - members[m.ID] = m - } - - e, err = st.Get(storeRemovedMembersPrefix, true, true) - if err != nil { - if isKeyNotFound(err) { - return members, removed - } - plog.Panicf("get storeRemovedMembers should never fail: %v", err) - } - for _, n := range e.Node.Nodes { - removed[MustParseMemberIDFromKey(n.Key)] = true - } - return members, removed -} - -func clusterVersionFromStore(st store.Store) *semver.Version { - e, err := st.Get(path.Join(storePrefix, "version"), false, false) - if err != nil { - if isKeyNotFound(err) { - return nil - } - plog.Panicf("unexpected error (%v) when getting cluster version from store", err) - } - return semver.Must(semver.NewVersion(*e.Node.Value)) -} - -// ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs -// with the existing cluster. If the validation succeeds, it assigns the IDs -// from the existing cluster to the local cluster. -// If the validation fails, an error will be returned. -func ValidateClusterAndAssignIDs(local *RaftCluster, existing *RaftCluster) error { - ems := existing.Members() - lms := local.Members() - if len(ems) != len(lms) { - return fmt.Errorf("member count is unequal") - } - sort.Sort(MembersByPeerURLs(ems)) - sort.Sort(MembersByPeerURLs(lms)) - - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer cancel() - for i := range ems { - if !netutil.URLStringsEqual(ctx, ems[i].PeerURLs, lms[i].PeerURLs) { - return fmt.Errorf("unmatched member while checking PeerURLs") - } - lms[i].ID = ems[i].ID - } - local.members = make(map[types.ID]*Member) - for _, m := range lms { - local.members[m.ID] = m - } - return nil -} - -func mustDetectDowngrade(cv *semver.Version) { - lv := semver.Must(semver.NewVersion(version.Version)) - // only keep major.minor version for comparison against cluster version - lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} - if cv != nil && lv.LessThan(*cv) { - plog.Fatalf("cluster cannot be downgraded (current version: %s is lower than determined cluster version: %s).", version.Version, version.Cluster(cv.String())) - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go b/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go deleted file mode 100644 index e4d36af2547..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "errors" - - etcdErr "github.com/coreos/etcd/error" -) - -var ( - ErrIDRemoved = errors.New("membership: ID removed") - ErrIDExists = errors.New("membership: ID exists") - ErrIDNotFound = errors.New("membership: ID not found") - ErrPeerURLexists = errors.New("membership: peerURL exists") -) - -func isKeyNotFound(err error) bool { - e, ok := err.(*etcdErr.Error) - return ok && e.ErrorCode == etcdErr.EcodeKeyNotFound -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/member.go b/vendor/github.com/coreos/etcd/etcdserver/membership/member.go deleted file mode 100644 index 6de74d26f8d..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/member.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "crypto/sha1" - "encoding/binary" - "fmt" - "math/rand" - "sort" - "time" - - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/pkg/capnslog" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/membership") -) - -// RaftAttributes represents the raft related attributes of an etcd member. -type RaftAttributes struct { - // PeerURLs is the list of peers in the raft cluster. - // TODO(philips): ensure these are URLs - PeerURLs []string `json:"peerURLs"` -} - -// Attributes represents all the non-raft related attributes of an etcd member. -type Attributes struct { - Name string `json:"name,omitempty"` - ClientURLs []string `json:"clientURLs,omitempty"` -} - -type Member struct { - ID types.ID `json:"id"` - RaftAttributes - Attributes -} - -// NewMember creates a Member without an ID and generates one based on the -// cluster name, peer URLs, and time. This is used for bootstrapping/adding new member. -func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member { - m := &Member{ - RaftAttributes: RaftAttributes{PeerURLs: peerURLs.StringSlice()}, - Attributes: Attributes{Name: name}, - } - - var b []byte - sort.Strings(m.PeerURLs) - for _, p := range m.PeerURLs { - b = append(b, []byte(p)...) - } - - b = append(b, []byte(clusterName)...) - if now != nil { - b = append(b, []byte(fmt.Sprintf("%d", now.Unix()))...) - } - - hash := sha1.Sum(b) - m.ID = types.ID(binary.BigEndian.Uint64(hash[:8])) - return m -} - -// PickPeerURL chooses a random address from a given Member's PeerURLs. -// It will panic if there is no PeerURLs available in Member. -func (m *Member) PickPeerURL() string { - if len(m.PeerURLs) == 0 { - plog.Panicf("member should always have some peer url") - } - return m.PeerURLs[rand.Intn(len(m.PeerURLs))] -} - -func (m *Member) Clone() *Member { - if m == nil { - return nil - } - mm := &Member{ - ID: m.ID, - Attributes: Attributes{ - Name: m.Name, - }, - } - if m.PeerURLs != nil { - mm.PeerURLs = make([]string, len(m.PeerURLs)) - copy(mm.PeerURLs, m.PeerURLs) - } - if m.ClientURLs != nil { - mm.ClientURLs = make([]string, len(m.ClientURLs)) - copy(mm.ClientURLs, m.ClientURLs) - } - return mm -} - -func (m *Member) IsStarted() bool { - return len(m.Name) != 0 -} - -// MembersByID implements sort by ID interface -type MembersByID []*Member - -func (ms MembersByID) Len() int { return len(ms) } -func (ms MembersByID) Less(i, j int) bool { return ms[i].ID < ms[j].ID } -func (ms MembersByID) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] } - -// MembersByPeerURLs implements sort by peer urls interface -type MembersByPeerURLs []*Member - -func (ms MembersByPeerURLs) Len() int { return len(ms) } -func (ms MembersByPeerURLs) Less(i, j int) bool { - return ms[i].PeerURLs[0] < ms[j].PeerURLs[0] -} -func (ms MembersByPeerURLs) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] } diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go deleted file mode 100644 index f2ea0120d74..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "encoding/json" - "fmt" - "path" - - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/store" - - "github.com/coreos/go-semver/semver" -) - -const ( - attributesSuffix = "attributes" - raftAttributesSuffix = "raftAttributes" - - // the prefix for stroing membership related information in store provided by store pkg. - storePrefix = "/0" -) - -var ( - membersBucketName = []byte("members") - membersRemovedBuckedName = []byte("members_removed") - clusterBucketName = []byte("cluster") - - StoreMembersPrefix = path.Join(storePrefix, "members") - storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members") -) - -func mustSaveMemberToBackend(be backend.Backend, m *Member) { - mkey := backendMemberKey(m.ID) - mvalue, err := json.Marshal(m) - if err != nil { - plog.Panicf("marshal raftAttributes should never fail: %v", err) - } - - tx := be.BatchTx() - tx.Lock() - tx.UnsafePut(membersBucketName, mkey, mvalue) - tx.Unlock() -} - -func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) { - mkey := backendMemberKey(id) - - tx := be.BatchTx() - tx.Lock() - tx.UnsafeDelete(membersBucketName, mkey) - tx.UnsafePut(membersRemovedBuckedName, mkey, []byte("removed")) - tx.Unlock() -} - -func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) { - ckey := backendClusterVersionKey() - - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - tx.UnsafePut(clusterBucketName, ckey, []byte(ver.String())) -} - -func mustSaveMemberToStore(s store.Store, m *Member) { - b, err := json.Marshal(m.RaftAttributes) - if err != nil { - plog.Panicf("marshal raftAttributes should never fail: %v", err) - } - p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) - if _, err := s.Create(p, false, string(b), false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { - plog.Panicf("create raftAttributes should never fail: %v", err) - } -} - -func mustDeleteMemberFromStore(s store.Store, id types.ID) { - if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil { - plog.Panicf("delete member should never fail: %v", err) - } - if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { - plog.Panicf("create removedMember should never fail: %v", err) - } -} - -func mustUpdateMemberInStore(s store.Store, m *Member) { - b, err := json.Marshal(m.RaftAttributes) - if err != nil { - plog.Panicf("marshal raftAttributes should never fail: %v", err) - } - p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) - if _, err := s.Update(p, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { - plog.Panicf("update raftAttributes should never fail: %v", err) - } -} - -func mustUpdateMemberAttrInStore(s store.Store, m *Member) { - b, err := json.Marshal(m.Attributes) - if err != nil { - plog.Panicf("marshal raftAttributes should never fail: %v", err) - } - p := path.Join(MemberStoreKey(m.ID), attributesSuffix) - if _, err := s.Set(p, false, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { - plog.Panicf("update raftAttributes should never fail: %v", err) - } -} - -func mustSaveClusterVersionToStore(s store.Store, ver *semver.Version) { - if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { - plog.Panicf("save cluster version should never fail: %v", err) - } -} - -// nodeToMember builds member from a key value node. -// the child nodes of the given node MUST be sorted by key. -func nodeToMember(n *store.NodeExtern) (*Member, error) { - m := &Member{ID: MustParseMemberIDFromKey(n.Key)} - attrs := make(map[string][]byte) - raftAttrKey := path.Join(n.Key, raftAttributesSuffix) - attrKey := path.Join(n.Key, attributesSuffix) - for _, nn := range n.Nodes { - if nn.Key != raftAttrKey && nn.Key != attrKey { - return nil, fmt.Errorf("unknown key %q", nn.Key) - } - attrs[nn.Key] = []byte(*nn.Value) - } - if data := attrs[raftAttrKey]; data != nil { - if err := json.Unmarshal(data, &m.RaftAttributes); err != nil { - return nil, fmt.Errorf("unmarshal raftAttributes error: %v", err) - } - } else { - return nil, fmt.Errorf("raftAttributes key doesn't exist") - } - if data := attrs[attrKey]; data != nil { - if err := json.Unmarshal(data, &m.Attributes); err != nil { - return m, fmt.Errorf("unmarshal attributes error: %v", err) - } - } - return m, nil -} - -func backendMemberKey(id types.ID) []byte { - return []byte(id.String()) -} - -func backendClusterVersionKey() []byte { - return []byte("clusterVersion") -} - -func mustCreateBackendBuckets(be backend.Backend) { - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - tx.UnsafeCreateBucket(membersBucketName) - tx.UnsafeCreateBucket(membersRemovedBuckedName) - tx.UnsafeCreateBucket(clusterBucketName) -} - -func MemberStoreKey(id types.ID) string { - return path.Join(StoreMembersPrefix, id.String()) -} - -func StoreClusterVersionKey() string { - return path.Join(storePrefix, "version") -} - -func MemberAttributesStorePath(id types.ID) string { - return path.Join(MemberStoreKey(id), attributesSuffix) -} - -func MustParseMemberIDFromKey(key string) types.ID { - id, err := types.IDFromString(path.Base(key)) - if err != nil { - plog.Panicf("unexpected parse member id error: %v", err) - } - return id -} - -func RemovedMemberStoreKey(id types.ID) string { - return path.Join(storeRemovedMembersPrefix, id.String()) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go deleted file mode 100644 index 2b549f738f7..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/metrics.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "time" - - "github.com/coreos/etcd/pkg/runtime" - "github.com/prometheus/client_golang/prometheus" -) - -var ( - hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "has_leader", - Help: "Whether or not a leader exists. 1 is existence, 0 is not.", - }) - leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "leader_changes_seen_total", - Help: "The number of leader changes seen.", - }) - proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_committed_total", - Help: "The total number of consensus proposals committed.", - }) - proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_applied_total", - Help: "The total number of consensus proposals applied.", - }) - proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_pending", - Help: "The current number of pending proposals to commit.", - }) - proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "proposals_failed_total", - Help: "The total number of failed proposals seen.", - }) -) - -func init() { - prometheus.MustRegister(hasLeader) - prometheus.MustRegister(leaderChanges) - prometheus.MustRegister(proposalsCommitted) - prometheus.MustRegister(proposalsApplied) - prometheus.MustRegister(proposalsPending) - prometheus.MustRegister(proposalsFailed) -} - -func monitorFileDescriptor(done <-chan struct{}) { - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - for { - used, err := runtime.FDUsage() - if err != nil { - plog.Errorf("cannot monitor file descriptor usage (%v)", err) - return - } - limit, err := runtime.FDLimit() - if err != nil { - plog.Errorf("cannot monitor file descriptor usage (%v)", err) - return - } - if used >= limit/5*4 { - plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit) - } - select { - case <-ticker.C: - case <-done: - return - } - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go deleted file mode 100644 index 088a4696253..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/quota.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/backend" -) - -// Quota represents an arbitrary quota against arbitrary requests. Each request -// costs some charge; if there is not enough remaining charge, then there are -// too few resources available within the quota to apply the request. -type Quota interface { - // Available judges whether the given request fits within the quota. - Available(req interface{}) bool - // Cost computes the charge against the quota for a given request. - Cost(req interface{}) int - // Remaining is the amount of charge left for the quota. - Remaining() int64 -} - -type passthroughQuota struct{} - -func (*passthroughQuota) Available(interface{}) bool { return true } -func (*passthroughQuota) Cost(interface{}) int { return 0 } -func (*passthroughQuota) Remaining() int64 { return 1 } - -type backendQuota struct { - s *EtcdServer - maxBackendBytes int64 -} - -const ( - // leaseOverhead is an estimate for the cost of storing a lease - leaseOverhead = 64 - // kvOverhead is an estimate for the cost of storing a key's metadata - kvOverhead = 256 -) - -func NewBackendQuota(s *EtcdServer) Quota { - if s.Cfg.QuotaBackendBytes < 0 { - // disable quotas if negative - plog.Warningf("disabling backend quota") - return &passthroughQuota{} - } - if s.Cfg.QuotaBackendBytes == 0 { - // use default size if no quota size given - return &backendQuota{s, backend.DefaultQuotaBytes} - } - if s.Cfg.QuotaBackendBytes > backend.MaxQuotaBytes { - plog.Warningf("backend quota %v exceeds maximum quota %v; using maximum", s.Cfg.QuotaBackendBytes, backend.MaxQuotaBytes) - return &backendQuota{s, backend.MaxQuotaBytes} - } - return &backendQuota{s, s.Cfg.QuotaBackendBytes} -} - -func (b *backendQuota) Available(v interface{}) bool { - // TODO: maybe optimize backend.Size() - return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes -} - -func (b *backendQuota) Cost(v interface{}) int { - switch r := v.(type) { - case *pb.PutRequest: - return costPut(r) - case *pb.TxnRequest: - return costTxn(r) - case *pb.LeaseGrantRequest: - return leaseOverhead - default: - panic("unexpected cost") - } -} - -func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) } - -func costTxnReq(u *pb.RequestOp) int { - r := u.GetRequestPut() - if r == nil { - return 0 - } - return costPut(r) -} - -func costTxn(r *pb.TxnRequest) int { - sizeSuccess := 0 - for _, u := range r.Success { - sizeSuccess += costTxnReq(u) - } - sizeFailure := 0 - for _, u := range r.Failure { - sizeFailure += costTxnReq(u) - } - if sizeFailure > sizeSuccess { - return sizeFailure - } - return sizeSuccess -} - -func (b *backendQuota) Remaining() int64 { - return b.maxBackendBytes - b.s.Backend().Size() -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go deleted file mode 100644 index d7ec176eb3a..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/raft.go +++ /dev/null @@ -1,547 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "expvar" - "sort" - "sync" - "sync/atomic" - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/contention" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/rafthttp" - "github.com/coreos/etcd/wal" - "github.com/coreos/etcd/wal/walpb" - "github.com/coreos/pkg/capnslog" -) - -const ( - // Number of entries for slow follower to catch-up after compacting - // the raft storage entries. - // We expect the follower has a millisecond level latency with the leader. - // The max throughput is around 10K. Keep a 5K entries is enough for helping - // follower to catch up. - numberOfCatchUpEntries = 5000 - - // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value). - // Assuming the RTT is around 10ms, 1MB max size is large enough. - maxSizePerMsg = 1 * 1024 * 1024 - // Never overflow the rafthttp buffer, which is 4096. - // TODO: a better const? - maxInflightMsgs = 4096 / 8 -) - -var ( - // protects raftStatus - raftStatusMu sync.Mutex - // indirection for expvar func interface - // expvar panics when publishing duplicate name - // expvar does not support remove a registered name - // so only register a func that calls raftStatus - // and change raftStatus as we need. - raftStatus func() raft.Status -) - -func init() { - raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft")) - expvar.Publish("raft.status", expvar.Func(func() interface{} { - raftStatusMu.Lock() - defer raftStatusMu.Unlock() - return raftStatus() - })) -} - -type RaftTimer interface { - Index() uint64 - Term() uint64 -} - -// apply contains entries, snapshot to be applied. Once -// an apply is consumed, the entries will be persisted to -// to raft storage concurrently; the application must read -// raftDone before assuming the raft messages are stable. -type apply struct { - entries []raftpb.Entry - snapshot raftpb.Snapshot - raftDone <-chan struct{} // rx {} after raft has persisted messages -} - -type raftNode struct { - // Cache of the latest raft index and raft term the server has seen. - // These three unit64 fields must be the first elements to keep 64-bit - // alignment for atomic access to the fields. - index uint64 - term uint64 - lead uint64 - - mu sync.Mutex - // last lead elected time - lt time.Time - - // to check if msg receiver is removed from cluster - isIDRemoved func(id uint64) bool - - raft.Node - - // a chan to send/receive snapshot - msgSnapC chan raftpb.Message - - // a chan to send out apply - applyc chan apply - - // a chan to send out readState - readStateC chan raft.ReadState - - // utility - ticker <-chan time.Time - // contention detectors for raft heartbeat message - td *contention.TimeoutDetector - heartbeat time.Duration // for logging - raftStorage *raft.MemoryStorage - storage Storage - // transport specifies the transport to send and receive msgs to members. - // Sending messages MUST NOT block. It is okay to drop messages, since - // clients should timeout and reissue their messages. - // If transport is nil, server will panic. - transport rafthttp.Transporter - - stopped chan struct{} - done chan struct{} -} - -// start prepares and starts raftNode in a new goroutine. It is no longer safe -// to modify the fields after it has been started. -func (r *raftNode) start(rh *raftReadyHandler) { - r.applyc = make(chan apply) - r.stopped = make(chan struct{}) - r.done = make(chan struct{}) - internalTimeout := time.Second - - go func() { - defer r.onStop() - islead := false - - for { - select { - case <-r.ticker: - r.Tick() - case rd := <-r.Ready(): - if rd.SoftState != nil { - if lead := atomic.LoadUint64(&r.lead); rd.SoftState.Lead != raft.None && lead != rd.SoftState.Lead { - r.mu.Lock() - r.lt = time.Now() - r.mu.Unlock() - leaderChanges.Inc() - } - - if rd.SoftState.Lead == raft.None { - hasLeader.Set(0) - } else { - hasLeader.Set(1) - } - - atomic.StoreUint64(&r.lead, rd.SoftState.Lead) - islead = rd.RaftState == raft.StateLeader - rh.updateLeadership() - } - - if len(rd.ReadStates) != 0 { - select { - case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: - case <-time.After(internalTimeout): - plog.Warningf("timed out sending read state") - case <-r.stopped: - return - } - } - - raftDone := make(chan struct{}, 1) - ap := apply{ - entries: rd.CommittedEntries, - snapshot: rd.Snapshot, - raftDone: raftDone, - } - - updateCommittedIndex(&ap, rh) - - select { - case r.applyc <- ap: - case <-r.stopped: - return - } - - // the leader can write to its disk in parallel with replicating to the followers and them - // writing to their disks. - // For more details, check raft thesis 10.2.1 - if islead { - // gofail: var raftBeforeLeaderSend struct{} - r.sendMessages(rd.Messages) - } - - // gofail: var raftBeforeSave struct{} - if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { - plog.Fatalf("raft save state and entries error: %v", err) - } - if !raft.IsEmptyHardState(rd.HardState) { - proposalsCommitted.Set(float64(rd.HardState.Commit)) - } - // gofail: var raftAfterSave struct{} - - if !raft.IsEmptySnap(rd.Snapshot) { - // gofail: var raftBeforeSaveSnap struct{} - if err := r.storage.SaveSnap(rd.Snapshot); err != nil { - plog.Fatalf("raft save snapshot error: %v", err) - } - // gofail: var raftAfterSaveSnap struct{} - r.raftStorage.ApplySnapshot(rd.Snapshot) - plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) - // gofail: var raftAfterApplySnap struct{} - } - - r.raftStorage.Append(rd.Entries) - - if !islead { - // gofail: var raftBeforeFollowerSend struct{} - r.sendMessages(rd.Messages) - } - raftDone <- struct{}{} - r.Advance() - case <-r.stopped: - return - } - } - }() -} - -func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { - var ci uint64 - if len(ap.entries) != 0 { - ci = ap.entries[len(ap.entries)-1].Index - } - if ap.snapshot.Metadata.Index > ci { - ci = ap.snapshot.Metadata.Index - } - if ci != 0 { - rh.updateCommittedIndex(ci) - } -} - -func (r *raftNode) sendMessages(ms []raftpb.Message) { - sentAppResp := false - for i := len(ms) - 1; i >= 0; i-- { - if r.isIDRemoved(ms[i].To) { - ms[i].To = 0 - } - - if ms[i].Type == raftpb.MsgAppResp { - if sentAppResp { - ms[i].To = 0 - } else { - sentAppResp = true - } - } - - if ms[i].Type == raftpb.MsgSnap { - // There are two separate data store: the store for v2, and the KV for v3. - // The msgSnap only contains the most recent snapshot of store without KV. - // So we need to redirect the msgSnap to etcd server main loop for merging in the - // current store snapshot and KV snapshot. - select { - case r.msgSnapC <- ms[i]: - default: - // drop msgSnap if the inflight chan if full. - } - ms[i].To = 0 - } - if ms[i].Type == raftpb.MsgHeartbeat { - ok, exceed := r.td.Observe(ms[i].To) - if !ok { - // TODO: limit request rate. - plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed) - plog.Warningf("server is likely overloaded") - } - } - } - - r.transport.Send(ms) -} - -func (r *raftNode) apply() chan apply { - return r.applyc -} - -func (r *raftNode) leadElectedTime() time.Time { - r.mu.Lock() - defer r.mu.Unlock() - return r.lt -} - -func (r *raftNode) stop() { - r.stopped <- struct{}{} - <-r.done -} - -func (r *raftNode) onStop() { - r.Stop() - r.transport.Stop() - if err := r.storage.Close(); err != nil { - plog.Panicf("raft close storage error: %v", err) - } - close(r.done) -} - -// for testing -func (r *raftNode) pauseSending() { - p := r.transport.(rafthttp.Pausable) - p.Pause() -} - -func (r *raftNode) resumeSending() { - p := r.transport.(rafthttp.Pausable) - p.Resume() -} - -// advanceTicksForElection advances ticks to the node for fast election. -// This reduces the time to wait for first leader election if bootstrapping the whole -// cluster, while leaving at least 1 heartbeat for possible existing leader -// to contact it. -func advanceTicksForElection(n raft.Node, electionTicks int) { - for i := 0; i < electionTicks-1; i++ { - n.Tick() - } -} - -func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) { - var err error - member := cl.MemberByName(cfg.Name) - metadata := pbutil.MustMarshal( - &pb.Metadata{ - NodeID: uint64(member.ID), - ClusterID: uint64(cl.ID()), - }, - ) - if w, err = wal.Create(cfg.WALDir(), metadata); err != nil { - plog.Fatalf("create wal error: %v", err) - } - peers := make([]raft.Peer, len(ids)) - for i, id := range ids { - ctx, err := json.Marshal((*cl).Member(id)) - if err != nil { - plog.Panicf("marshal member should never fail: %v", err) - } - peers[i] = raft.Peer{ID: uint64(id), Context: ctx} - } - id = member.ID - plog.Infof("starting member %s in cluster %s", id, cl.ID()) - s = raft.NewMemoryStorage() - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - } - - n = raft.StartNode(c, peers) - raftStatusMu.Lock() - raftStatus = n.Status - raftStatusMu.Unlock() - advanceTicksForElection(n, c.ElectionTick) - return -} - -func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { - var walsnap walpb.Snapshot - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) - - plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit) - cl := membership.NewCluster("") - cl.SetID(cid) - s := raft.NewMemoryStorage() - if snapshot != nil { - s.ApplySnapshot(*snapshot) - } - s.SetHardState(st) - s.Append(ents) - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - } - - n := raft.RestartNode(c) - raftStatusMu.Lock() - raftStatus = n.Status - raftStatusMu.Unlock() - advanceTicksForElection(n, c.ElectionTick) - return id, cl, n, s, w -} - -func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { - var walsnap walpb.Snapshot - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) - - // discard the previously uncommitted entries - for i, ent := range ents { - if ent.Index > st.Commit { - plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i) - ents = ents[:i] - break - } - } - - // force append the configuration change entries - toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit) - ents = append(ents, toAppEnts...) - - // force commit newly appended entries - err := w.Save(raftpb.HardState{}, toAppEnts) - if err != nil { - plog.Fatalf("%v", err) - } - if len(ents) != 0 { - st.Commit = ents[len(ents)-1].Index - } - - plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit) - cl := membership.NewCluster("") - cl.SetID(cid) - s := raft.NewMemoryStorage() - if snapshot != nil { - s.ApplySnapshot(*snapshot) - } - s.SetHardState(st) - s.Append(ents) - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - } - n := raft.RestartNode(c) - raftStatus = n.Status - return id, cl, n, s, w -} - -// getIDs returns an ordered set of IDs included in the given snapshot and -// the entries. The given snapshot/entries can contain two kinds of -// ID-related entry: -// - ConfChangeAddNode, in which case the contained ID will be added into the set. -// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set. -func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { - ids := make(map[uint64]bool) - if snap != nil { - for _, id := range snap.Metadata.ConfState.Nodes { - ids[id] = true - } - } - for _, e := range ents { - if e.Type != raftpb.EntryConfChange { - continue - } - var cc raftpb.ConfChange - pbutil.MustUnmarshal(&cc, e.Data) - switch cc.Type { - case raftpb.ConfChangeAddNode: - ids[cc.NodeID] = true - case raftpb.ConfChangeRemoveNode: - delete(ids, cc.NodeID) - case raftpb.ConfChangeUpdateNode: - // do nothing - default: - plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!") - } - } - sids := make(types.Uint64Slice, 0, len(ids)) - for id := range ids { - sids = append(sids, id) - } - sort.Sort(sids) - return []uint64(sids) -} - -// createConfigChangeEnts creates a series of Raft entries (i.e. -// EntryConfChange) to remove the set of given IDs from the cluster. The ID -// `self` is _not_ removed, even if present in the set. -// If `self` is not inside the given ids, it creates a Raft entry to add a -// default member with the given `self`. -func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry { - ents := make([]raftpb.Entry, 0) - next := index + 1 - found := false - for _, id := range ids { - if id == self { - found = true - continue - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - next++ - } - if !found { - m := membership.Member{ - ID: types.ID(self), - RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, - } - ctx, err := json.Marshal(m) - if err != nil { - plog.Panicf("marshal member should never fail: %v", err) - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: self, - Context: ctx, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - } - return ents -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go deleted file mode 100644 index 98eb2cc7b29..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/server.go +++ /dev/null @@ -1,1648 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "expvar" - "fmt" - "math" - "math/rand" - "net/http" - "os" - "path" - "path/filepath" - "regexp" - "sync" - "sync/atomic" - "time" - - "github.com/coreos/etcd/alarm" - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/compactor" - "github.com/coreos/etcd/discovery" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/contention" - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/pkg/idutil" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/runtime" - "github.com/coreos/etcd/pkg/schedule" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/pkg/wait" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/rafthttp" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" - "github.com/coreos/etcd/wal" - "github.com/coreos/go-semver/semver" - "github.com/coreos/pkg/capnslog" - "golang.org/x/net/context" -) - -const ( - DefaultSnapCount = 10000 - - StoreClusterPrefix = "/0" - StoreKeysPrefix = "/1" - - // HealthInterval is the minimum time the cluster should be healthy - // before accepting add member requests. - HealthInterval = 5 * time.Second - - purgeFileInterval = 30 * time.Second - // monitorVersionInterval should be smaller than the timeout - // on the connection. Or we will not be able to reuse the connection - // (since it will timeout). - monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second - - databaseFilename = "db" - // max number of in-flight snapshot messages etcdserver allows to have - // This number is more than enough for most clusters with 5 machines. - maxInFlightMsgSnap = 16 - - releaseDelayAfterSnapshot = 30 * time.Second - - // maxPendingRevokes is the maximum number of outstanding expired lease revocations. - maxPendingRevokes = 16 -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver") - - storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes")) -) - -func init() { - rand.Seed(time.Now().UnixNano()) - - expvar.Publish( - "file_descriptor_limit", - expvar.Func( - func() interface{} { - n, _ := runtime.FDLimit() - return n - }, - ), - ) -} - -type Response struct { - Event *store.Event - Watcher store.Watcher - err error -} - -type Server interface { - // Start performs any initialization of the Server necessary for it to - // begin serving requests. It must be called before Do or Process. - // Start must be non-blocking; any long-running server functionality - // should be implemented in goroutines. - Start() - // Stop terminates the Server and performs any necessary finalization. - // Do and Process cannot be called after Stop has been invoked. - Stop() - // ID returns the ID of the Server. - ID() types.ID - // Leader returns the ID of the leader Server. - Leader() types.ID - // Do takes a request and attempts to fulfill it, returning a Response. - Do(ctx context.Context, r pb.Request) (Response, error) - // Process takes a raft message and applies it to the server's raft state - // machine, respecting any timeout of the given context. - Process(ctx context.Context, m raftpb.Message) error - // AddMember attempts to add a member into the cluster. It will return - // ErrIDRemoved if member ID is removed from the cluster, or return - // ErrIDExists if member ID exists in the cluster. - AddMember(ctx context.Context, memb membership.Member) error - // RemoveMember attempts to remove a member from the cluster. It will - // return ErrIDRemoved if member ID is removed from the cluster, or return - // ErrIDNotFound if member ID is not in the cluster. - RemoveMember(ctx context.Context, id uint64) error - - // UpdateMember attempts to update an existing member in the cluster. It will - // return ErrIDNotFound if the member ID does not exist. - UpdateMember(ctx context.Context, updateMemb membership.Member) error - - // ClusterVersion is the cluster-wide minimum major.minor version. - // Cluster version is set to the min version that an etcd member is - // compatible with when first bootstrap. - // - // ClusterVersion is nil until the cluster is bootstrapped (has a quorum). - // - // During a rolling upgrades, the ClusterVersion will be updated - // automatically after a sync. (5 second by default) - // - // The API/raft component can utilize ClusterVersion to determine if - // it can accept a client request or a raft RPC. - // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and - // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since - // this feature is introduced post 2.0. - ClusterVersion() *semver.Version -} - -// EtcdServer is the production implementation of the Server interface -type EtcdServer struct { - // inflightSnapshots holds count the number of snapshots currently inflight. - inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned. - appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. - committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. - // consistIndex used to hold the offset of current executing entry - // It is initialized to 0 before executing any entry. - consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned. - Cfg *ServerConfig - - readych chan struct{} - r raftNode - - snapCount uint64 - - w wait.Wait - - readMu sync.RWMutex - // read routine notifies etcd server that it waits for reading by sending an empty struct to - // readwaitC - readwaitc chan struct{} - // readNotifier is used to notify the read routine that it can process the request - // when there is no error - readNotifier *notifier - - // stop signals the run goroutine should shutdown. - stop chan struct{} - // stopping is closed by run goroutine on shutdown. - stopping chan struct{} - // done is closed when all goroutines from start() complete. - done chan struct{} - - errorc chan error - id types.ID - attributes membership.Attributes - - cluster *membership.RaftCluster - - store store.Store - - applyV2 ApplierV2 - - // applyV3 is the applier with auth and quotas - applyV3 applierV3 - // applyV3Base is the core applier without auth or quotas - applyV3Base applierV3 - applyWait wait.WaitTime - - kv mvcc.ConsistentWatchableKV - lessor lease.Lessor - bemu sync.Mutex - be backend.Backend - authStore auth.AuthStore - alarmStore *alarm.AlarmStore - - stats *stats.ServerStats - lstats *stats.LeaderStats - - SyncTicker <-chan time.Time - // compactor is used to auto-compact the KV. - compactor *compactor.Periodic - - // peerRt used to send requests (version, lease) to peers. - peerRt http.RoundTripper - reqIDGen *idutil.Generator - - // forceVersionC is used to force the version monitor loop - // to detect the cluster version immediately. - forceVersionC chan struct{} - - // wgMu blocks concurrent waitgroup mutation while server stopping - wgMu sync.RWMutex - // wg is used to wait for the go routines that depends on the server state - // to exit when stopping the server. - wg sync.WaitGroup -} - -// NewServer creates a new EtcdServer from the supplied configuration. The -// configuration is considered static for the lifetime of the EtcdServer. -func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { - st := store.New(StoreClusterPrefix, StoreKeysPrefix) - - var ( - w *wal.WAL - n raft.Node - s *raft.MemoryStorage - id types.ID - cl *membership.RaftCluster - ) - - if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { - return nil, fmt.Errorf("cannot access data directory: %v", terr) - } - - haveWAL := wal.Exist(cfg.WALDir()) - - if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil { - plog.Fatalf("create snapshot directory error: %v", err) - } - ss := snap.New(cfg.SnapDir()) - - bepath := filepath.Join(cfg.SnapDir(), databaseFilename) - beExist := fileutil.Exist(bepath) - - var be backend.Backend - beOpened := make(chan struct{}) - go func() { - be = backend.NewDefaultBackend(bepath) - beOpened <- struct{}{} - }() - - select { - case <-beOpened: - case <-time.After(time.Second): - plog.Warningf("another etcd process is running with the same data dir and holding the file lock.") - plog.Warningf("waiting for it to exit before starting...") - <-beOpened - } - - defer func() { - if err != nil { - be.Close() - } - }() - - prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout()) - if err != nil { - return nil, err - } - var ( - remotes []*membership.Member - snapshot *raftpb.Snapshot - ) - - switch { - case !haveWAL && !cfg.NewCluster: - if err = cfg.VerifyJoinExisting(); err != nil { - return nil, err - } - cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) - if err != nil { - return nil, err - } - existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt) - if gerr != nil { - return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) - } - if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil { - return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) - } - if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) { - return nil, fmt.Errorf("incompatible with current running cluster") - } - - remotes = existingCluster.Members() - cl.SetID(existingCluster.ID()) - cl.SetStore(st) - cl.SetBackend(be) - cfg.Print() - id, n, s, w = startNode(cfg, cl, nil) - case !haveWAL && cfg.NewCluster: - if err = cfg.VerifyBootstrap(); err != nil { - return nil, err - } - cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) - if err != nil { - return nil, err - } - m := cl.MemberByName(cfg.Name) - if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) { - return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) - } - if cfg.ShouldDiscover() { - var str string - str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) - if err != nil { - return nil, &DiscoveryError{Op: "join", Err: err} - } - var urlsmap types.URLsMap - urlsmap, err = types.NewURLsMap(str) - if err != nil { - return nil, err - } - if checkDuplicateURL(urlsmap) { - return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) - } - if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil { - return nil, err - } - } - cl.SetStore(st) - cl.SetBackend(be) - cfg.PrintWithInitial() - id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) - case haveWAL: - if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { - return nil, fmt.Errorf("cannot write to member directory: %v", err) - } - - if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil { - return nil, fmt.Errorf("cannot write to WAL directory: %v", err) - } - - if cfg.ShouldDiscover() { - plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir()) - } - snapshot, err = ss.Load() - if err != nil && err != snap.ErrNoSnapshot { - return nil, err - } - if snapshot != nil { - if err = st.Recovery(snapshot.Data); err != nil { - plog.Panicf("recovered store from snapshot error: %v", err) - } - plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) - } - cfg.Print() - if !cfg.ForceNewCluster { - id, cl, n, s, w = restartNode(cfg, snapshot) - } else { - id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot) - } - cl.SetStore(st) - cl.SetBackend(be) - cl.Recover(api.UpdateCapability) - if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist { - os.RemoveAll(bepath) - return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath) - } - default: - return nil, fmt.Errorf("unsupported bootstrap config") - } - - if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil { - return nil, fmt.Errorf("cannot access member directory: %v", terr) - } - - sstats := &stats.ServerStats{ - Name: cfg.Name, - ID: id.String(), - } - sstats.Initialize() - lstats := stats.NewLeaderStats(id.String()) - - heartbeat := time.Duration(cfg.TickMs) * time.Millisecond - srv = &EtcdServer{ - readych: make(chan struct{}), - Cfg: cfg, - snapCount: cfg.SnapCount, - errorc: make(chan error, 1), - store: st, - r: raftNode{ - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - ticker: time.Tick(heartbeat), - // set up contention detectors for raft heartbeat message. - // expect to send a heartbeat within 2 heartbeat intervals. - td: contention.NewTimeoutDetector(2 * heartbeat), - heartbeat: heartbeat, - raftStorage: s, - storage: NewStorage(w, ss), - msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - readStateC: make(chan raft.ReadState, 1), - }, - id: id, - attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, - cluster: cl, - stats: sstats, - lstats: lstats, - SyncTicker: time.Tick(500 * time.Millisecond), - peerRt: prt, - reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), - forceVersionC: make(chan struct{}), - } - - srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster} - - srv.be = be - minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat - - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. - // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. - srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds()))) - srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex) - if beExist { - kvindex := srv.kv.ConsistentIndex() - // TODO: remove kvindex != 0 checking when we do not expect users to upgrade - // etcd from pre-3.0 release. - if snapshot != nil && kvindex < snapshot.Metadata.Index { - if kvindex != 0 { - return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index) - } - plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) - } - } - srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) - - srv.authStore = auth.NewAuthStore(srv.be, - func(index uint64) <-chan struct{} { - return srv.applyWait.Wait(index) - }) - if h := cfg.AutoCompactionRetention; h != 0 { - srv.compactor = compactor.NewPeriodic(h, srv.kv, srv) - srv.compactor.Run() - } - - srv.applyV3Base = &applierV3backend{srv} - if err = srv.restoreAlarms(); err != nil { - return nil, err - } - - // TODO: move transport initialization near the definition of remote - tr := &rafthttp.Transport{ - TLSInfo: cfg.PeerTLSInfo, - DialTimeout: cfg.peerDialTimeout(), - ID: id, - URLs: cfg.PeerURLs, - ClusterID: cl.ID(), - Raft: srv, - Snapshotter: ss, - ServerStats: sstats, - LeaderStats: lstats, - ErrorC: srv.errorc, - } - if err = tr.Start(); err != nil { - return nil, err - } - // add all remotes into transport - for _, m := range remotes { - if m.ID != id { - tr.AddRemote(m.ID, m.PeerURLs) - } - } - for _, m := range cl.Members() { - if m.ID != id { - tr.AddPeer(m.ID, m.PeerURLs) - } - } - srv.r.transport = tr - - return srv, nil -} - -// Start prepares and starts server in a new goroutine. It is no longer safe to -// modify a server's fields after it has been sent to Start. -// It also starts a goroutine to publish its server information. -func (s *EtcdServer) Start() { - s.start() - s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) - s.goAttach(s.purgeFile) - s.goAttach(func() { monitorFileDescriptor(s.stopping) }) - s.goAttach(s.monitorVersions) - s.goAttach(s.linearizableReadLoop) -} - -// start prepares and starts server in a new goroutine. It is no longer safe to -// modify a server's fields after it has been sent to Start. -// This function is just used for testing. -func (s *EtcdServer) start() { - if s.snapCount == 0 { - plog.Infof("set snapshot count to default %d", DefaultSnapCount) - s.snapCount = DefaultSnapCount - } - s.w = wait.New() - s.applyWait = wait.NewTimeList() - s.done = make(chan struct{}) - s.stop = make(chan struct{}) - s.stopping = make(chan struct{}) - s.readwaitc = make(chan struct{}, 1) - s.readNotifier = newNotifier() - if s.ClusterVersion() != nil { - plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String())) - } else { - plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version) - } - // TODO: if this is an empty log, writes all peer infos - // into the first entry - go s.run() -} - -func (s *EtcdServer) purgeFile() { - var serrc, werrc <-chan error - if s.Cfg.MaxSnapFiles > 0 { - serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) - } - if s.Cfg.MaxWALFiles > 0 { - werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done) - } - select { - case e := <-werrc: - plog.Fatalf("failed to purge wal file %v", e) - case e := <-serrc: - plog.Fatalf("failed to purge snap file %v", e) - case <-s.stopping: - return - } -} - -func (s *EtcdServer) ID() types.ID { return s.id } - -func (s *EtcdServer) Cluster() *membership.RaftCluster { return s.cluster } - -func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() } - -func (s *EtcdServer) Lessor() lease.Lessor { return s.lessor } - -func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) } - -func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { - if s.cluster.IsIDRemoved(types.ID(m.From)) { - plog.Warningf("reject message from removed member %s", types.ID(m.From).String()) - return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") - } - if m.Type == raftpb.MsgApp { - s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size()) - } - return s.r.Step(ctx, m) -} - -func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) } - -func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) } - -// ReportSnapshot reports snapshot sent status to the raft state machine, -// and clears the used snapshot from the snapshot store. -func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) { - s.r.ReportSnapshot(id, status) -} - -type etcdProgress struct { - confState raftpb.ConfState - snapi uint64 - appliedt uint64 - appliedi uint64 -} - -// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode, -// and helps decouple state machine logic from Raft algorithms. -// TODO: add a state machine interface to apply the commit entries and do snapshot/recover -type raftReadyHandler struct { - updateLeadership func() - updateCommittedIndex func(uint64) -} - -func (s *EtcdServer) run() { - snap, err := s.r.raftStorage.Snapshot() - if err != nil { - plog.Panicf("get snapshot from raft storage error: %v", err) - } - - var ( - smu sync.RWMutex - syncC <-chan time.Time - ) - setSyncC := func(ch <-chan time.Time) { - smu.Lock() - syncC = ch - smu.Unlock() - } - getSyncC := func() (ch <-chan time.Time) { - smu.RLock() - ch = syncC - smu.RUnlock() - return - } - rh := &raftReadyHandler{ - updateLeadership: func() { - if !s.isLeader() { - if s.lessor != nil { - s.lessor.Demote() - } - if s.compactor != nil { - s.compactor.Pause() - } - setSyncC(nil) - } else { - setSyncC(s.SyncTicker) - if s.compactor != nil { - s.compactor.Resume() - } - } - - // TODO: remove the nil checking - // current test utility does not provide the stats - if s.stats != nil { - s.stats.BecomeLeader() - } - if s.r.td != nil { - s.r.td.Reset() - } - }, - updateCommittedIndex: func(ci uint64) { - cci := s.getCommittedIndex() - if ci > cci { - s.setCommittedIndex(ci) - } - }, - } - s.r.start(rh) - - // asynchronously accept apply packets, dispatch progress in-order - sched := schedule.NewFIFOScheduler() - ep := etcdProgress{ - confState: snap.Metadata.ConfState, - snapi: snap.Metadata.Index, - appliedt: snap.Metadata.Term, - appliedi: snap.Metadata.Index, - } - - defer func() { - s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping - close(s.stopping) - s.wgMu.Unlock() - - sched.Stop() - - // wait for gouroutines before closing raft so wal stays open - s.wg.Wait() - - // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines - // by adding a peer after raft stops the transport - s.r.stop() - - // kv, lessor and backend can be nil if running without v3 enabled - // or running unit tests. - if s.lessor != nil { - s.lessor.Stop() - } - if s.kv != nil { - s.kv.Close() - } - if s.authStore != nil { - s.authStore.Close() - } - if s.be != nil { - s.be.Close() - } - if s.compactor != nil { - s.compactor.Stop() - } - close(s.done) - }() - - var expiredLeaseC <-chan []*lease.Lease - if s.lessor != nil { - expiredLeaseC = s.lessor.ExpiredLeasesC() - } - - for { - select { - case ap := <-s.r.apply(): - f := func(context.Context) { s.applyAll(&ep, &ap) } - sched.Schedule(f) - case leases := <-expiredLeaseC: - s.goAttach(func() { - // Increases throughput of expired leases deletion process through parallelization - c := make(chan struct{}, maxPendingRevokes) - for _, lease := range leases { - select { - case c <- struct{}{}: - case <-s.stopping: - return - } - lid := lease.ID - s.goAttach(func() { - s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(lid)}) - <-c - }) - } - }) - case err := <-s.errorc: - plog.Errorf("%s", err) - plog.Infof("the data-dir used by this member must be removed.") - return - case <-getSyncC(): - if s.store.HasTTLKeys() { - s.sync(s.Cfg.ReqTimeout()) - } - case <-s.stop: - return - } - } -} - -func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { - s.applySnapshot(ep, apply) - st := time.Now() - s.applyEntries(ep, apply) - d := time.Since(st) - entriesNum := len(apply.entries) - if entriesNum != 0 && d > time.Duration(entriesNum)*warnApplyDuration { - plog.Warningf("apply entries took too long [%v for %d entries]", d, len(apply.entries)) - plog.Warningf("avoid queries with large range/delete range!") - } - proposalsApplied.Set(float64(ep.appliedi)) - s.applyWait.Trigger(ep.appliedi) - // wait for the raft routine to finish the disk writes before triggering a - // snapshot. or applied index might be greater than the last index in raft - // storage, since the raft routine might be slower than apply routine. - <-apply.raftDone - - s.triggerSnapshot(ep) - select { - // snapshot requested via send() - case m := <-s.r.msgSnapC: - merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState) - s.sendMergedSnap(merged) - default: - } -} - -func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { - if raft.IsEmptySnap(apply.snapshot) { - return - } - - plog.Infof("applying snapshot at index %d...", ep.snapi) - defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi) - - if apply.snapshot.Metadata.Index <= ep.appliedi { - plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1", - apply.snapshot.Metadata.Index, ep.appliedi) - } - - snapfn, err := s.r.storage.DBFilePath(apply.snapshot.Metadata.Index) - if err != nil { - plog.Panicf("get database snapshot file path error: %v", err) - } - - fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename) - if err := os.Rename(snapfn, fn); err != nil { - plog.Panicf("rename snapshot file error: %v", err) - } - - newbe := backend.NewDefaultBackend(fn) - - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. - // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. - if s.lessor != nil { - plog.Info("recovering lessor...") - s.lessor.Recover(newbe, s.kv) - plog.Info("finished recovering lessor") - } - - plog.Info("restoring mvcc store...") - - if err := s.kv.Restore(newbe); err != nil { - plog.Panicf("restore KV error: %v", err) - } - s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex()) - - plog.Info("finished restoring mvcc store") - - // Closing old backend might block until all the txns - // on the backend are finished. - // We do not want to wait on closing the old backend. - s.bemu.Lock() - oldbe := s.be - go func() { - plog.Info("closing old backend...") - defer plog.Info("finished closing old backend") - - if err := oldbe.Close(); err != nil { - plog.Panicf("close backend error: %v", err) - } - }() - - s.be = newbe - s.bemu.Unlock() - - plog.Info("recovering alarms...") - if err := s.restoreAlarms(); err != nil { - plog.Panicf("restore alarms error: %v", err) - } - plog.Info("finished recovering alarms") - - if s.authStore != nil { - plog.Info("recovering auth store...") - s.authStore.Recover(newbe) - plog.Info("finished recovering auth store") - } - - plog.Info("recovering store v2...") - if err := s.store.Recovery(apply.snapshot.Data); err != nil { - plog.Panicf("recovery store error: %v", err) - } - plog.Info("finished recovering store v2") - - s.cluster.SetBackend(s.be) - plog.Info("recovering cluster configuration...") - s.cluster.Recover(api.UpdateCapability) - plog.Info("finished recovering cluster configuration") - - plog.Info("removing old peers from network...") - // recover raft transport - s.r.transport.RemoveAllPeers() - plog.Info("finished removing old peers from network") - - plog.Info("adding peers from new cluster configuration into network...") - for _, m := range s.cluster.Members() { - if m.ID == s.ID() { - continue - } - s.r.transport.AddPeer(m.ID, m.PeerURLs) - } - plog.Info("finished adding peers from new cluster configuration into network...") - - ep.appliedt = apply.snapshot.Metadata.Term - ep.appliedi = apply.snapshot.Metadata.Index - ep.snapi = ep.appliedi - ep.confState = apply.snapshot.Metadata.ConfState -} - -func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { - if len(apply.entries) == 0 { - return - } - firsti := apply.entries[0].Index - if firsti > ep.appliedi+1 { - plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi) - } - var ents []raftpb.Entry - if ep.appliedi+1-firsti < uint64(len(apply.entries)) { - ents = apply.entries[ep.appliedi+1-firsti:] - } - if len(ents) == 0 { - return - } - var shouldstop bool - if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop { - go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster")) - } -} - -func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { - if ep.appliedi-ep.snapi <= s.snapCount { - return - } - - plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi) - s.snapshot(ep.appliedi, ep.confState) - ep.snapi = ep.appliedi -} - -func (s *EtcdServer) isMultiNode() bool { - return s.cluster != nil && len(s.cluster.MemberIDs()) > 1 -} - -func (s *EtcdServer) isLeader() bool { - return uint64(s.ID()) == s.Lead() -} - -// transferLeadership transfers the leader to the given transferee. -// TODO: maybe expose to client? -func (s *EtcdServer) transferLeadership(ctx context.Context, lead, transferee uint64) error { - now := time.Now() - interval := time.Duration(s.Cfg.TickMs) * time.Millisecond - - plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee)) - s.r.TransferLeadership(ctx, lead, transferee) - for s.Lead() != transferee { - select { - case <-ctx.Done(): // time out - return ErrTimeoutLeaderTransfer - case <-time.After(interval): - } - } - - // TODO: drain all requests, or drop all messages to the old leader - - plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now)) - return nil -} - -// TransferLeadership transfers the leader to the chosen transferee. -func (s *EtcdServer) TransferLeadership() error { - if !s.isLeader() { - plog.Printf("skipped leadership transfer for stopping non-leader member") - return nil - } - - if !s.isMultiNode() { - plog.Printf("skipped leadership transfer for single member cluster") - return nil - } - - transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs()) - if !ok { - return ErrUnhealthy - } - - tm := s.Cfg.ReqTimeout() - ctx, cancel := context.WithTimeout(context.TODO(), tm) - err := s.transferLeadership(ctx, s.Lead(), uint64(transferee)) - cancel() - return err -} - -// HardStop stops the server without coordination with other members in the cluster. -func (s *EtcdServer) HardStop() { - select { - case s.stop <- struct{}{}: - case <-s.done: - return - } - <-s.done -} - -// Stop stops the server gracefully, and shuts down the running goroutine. -// Stop should be called after a Start(s), otherwise it will block forever. -// When stopping leader, Stop transfers its leadership to one of its peers -// before stopping the server. -func (s *EtcdServer) Stop() { - if err := s.TransferLeadership(); err != nil { - plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err) - } - s.HardStop() -} - -// ReadyNotify returns a channel that will be closed when the server -// is ready to serve client requests -func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych } - -func (s *EtcdServer) stopWithDelay(d time.Duration, err error) { - select { - case <-time.After(d): - case <-s.done: - } - select { - case s.errorc <- err: - default: - } -} - -// StopNotify returns a channel that receives a empty struct -// when the server is stopped. -func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done } - -func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() } - -func (s *EtcdServer) LeaderStats() []byte { - lead := atomic.LoadUint64(&s.r.lead) - if lead != uint64(s.id) { - return nil - } - return s.lstats.JSON() -} - -func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() } - -func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { - if s.authStore == nil { - // In the context of ordinal etcd process, s.authStore will never be nil. - // This branch is for handling cases in server_test.go - return nil - } - - // Note that this permission check is done in the API layer, - // so TOCTOU problem can be caused potentially in a schedule like this: - // update membership with user A -> revoke root role of A -> apply membership change - // in the state machine layer - // However, both of membership change and role management requires the root privilege. - // So careful operation by admins can prevent the problem. - authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) - if err != nil { - return err - } - - return s.AuthStore().IsAdminPermitted(authInfo) -} - -func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) error { - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err - } - - if s.Cfg.StrictReconfigCheck { - // by default StrictReconfigCheck is enabled; reject new members if unhealthy - if !s.cluster.IsReadyToAddNewMember() { - plog.Warningf("not enough started members, rejecting member add %+v", memb) - return ErrNotEnoughStartedMembers - } - if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) { - plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) - return ErrUnhealthy - } - } - - // TODO: move Member to protobuf type - b, err := json.Marshal(memb) - if err != nil { - return err - } - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: uint64(memb.ID), - Context: b, - } - return s.configure(ctx, cc) -} - -func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error { - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err - } - - // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss - if err := s.mayRemoveMember(types.ID(id)); err != nil { - return err - } - - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - } - return s.configure(ctx, cc) -} - -func (s *EtcdServer) mayRemoveMember(id types.ID) error { - if !s.Cfg.StrictReconfigCheck { - return nil - } - - if !s.cluster.IsReadyToRemoveMember(uint64(id)) { - plog.Warningf("not enough started members, rejecting remove member %s", id) - return ErrNotEnoughStartedMembers - } - - // downed member is safe to remove since it's not part of the active quorum - if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() { - return nil - } - - // protect quorum if some members are down - m := s.cluster.Members() - active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m) - if (active - 1) < 1+((len(m)-1)/2) { - plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id) - return ErrUnhealthy - } - - return nil -} - -func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) error { - b, merr := json.Marshal(memb) - if merr != nil { - return merr - } - - if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err - } - cc := raftpb.ConfChange{ - Type: raftpb.ConfChangeUpdateNode, - NodeID: uint64(memb.ID), - Context: b, - } - return s.configure(ctx, cc) -} - -// Implement the RaftTimer interface - -func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) } - -func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) } - -// Lead is only for testing purposes. -// TODO: add Raft server interface to expose raft related info: -// Index, Term, Lead, Committed, Applied, LastIndex, etc. -func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) } - -func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) } - -// configure sends a configuration change through consensus and -// then waits for it to be applied to the server. It -// will block until the change is performed or there is an error. -func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error { - cc.ID = s.reqIDGen.Next() - ch := s.w.Register(cc.ID) - start := time.Now() - if err := s.r.ProposeConfChange(ctx, cc); err != nil { - s.w.Trigger(cc.ID, nil) - return err - } - select { - case x := <-ch: - if err, ok := x.(error); ok { - return err - } - if x != nil { - plog.Panicf("return type should always be error") - } - return nil - case <-ctx.Done(): - s.w.Trigger(cc.ID, nil) // GC wait - return s.parseProposeCtxErr(ctx.Err(), start) - case <-s.stopping: - return ErrStopped - } -} - -// sync proposes a SYNC request and is non-blocking. -// This makes no guarantee that the request will be proposed or performed. -// The request will be canceled after the given timeout. -func (s *EtcdServer) sync(timeout time.Duration) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - req := pb.Request{ - Method: "SYNC", - ID: s.reqIDGen.Next(), - Time: time.Now().UnixNano(), - } - data := pbutil.MustMarshal(&req) - // There is no promise that node has leader when do SYNC request, - // so it uses goroutine to propose. - s.goAttach(func() { - s.r.Propose(ctx, data) - cancel() - }) -} - -// publish registers server information into the cluster. The information -// is the JSON representation of this server's member struct, updated with the -// static clientURLs of the server. -// The function keeps attempting to register until it succeeds, -// or its server is stopped. -func (s *EtcdServer) publish(timeout time.Duration) { - b, err := json.Marshal(s.attributes) - if err != nil { - plog.Panicf("json marshal error: %v", err) - return - } - req := pb.Request{ - Method: "PUT", - Path: membership.MemberAttributesStorePath(s.id), - Val: string(b), - } - - for { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - _, err := s.Do(ctx, req) - cancel() - switch err { - case nil: - close(s.readych) - plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID()) - return - case ErrStopped: - plog.Infof("aborting publish because server is stopped") - return - default: - plog.Errorf("publish error: %v", err) - } - } -} - -func (s *EtcdServer) sendMergedSnap(merged snap.Message) { - atomic.AddInt64(&s.inflightSnapshots, 1) - - s.r.transport.SendSnapshot(merged) - s.goAttach(func() { - select { - case ok := <-merged.CloseNotify(): - // delay releasing inflight snapshot for another 30 seconds to - // block log compaction. - // If the follower still fails to catch up, it is probably just too slow - // to catch up. We cannot avoid the snapshot cycle anyway. - if ok { - select { - case <-time.After(releaseDelayAfterSnapshot): - case <-s.stopping: - } - } - atomic.AddInt64(&s.inflightSnapshots, -1) - case <-s.stopping: - return - } - }) -} - -// apply takes entries received from Raft (after it has been committed) and -// applies them to the current state of the EtcdServer. -// The given entries should not be empty. -func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) { - for i := range es { - e := es[i] - switch e.Type { - case raftpb.EntryNormal: - s.applyEntryNormal(&e) - case raftpb.EntryConfChange: - // set the consistent index of current executing entry - if e.Index > s.consistIndex.ConsistentIndex() { - s.consistIndex.setConsistentIndex(e.Index) - } - var cc raftpb.ConfChange - pbutil.MustUnmarshal(&cc, e.Data) - removedSelf, err := s.applyConfChange(cc, confState) - s.setAppliedIndex(e.Index) - shouldStop = shouldStop || removedSelf - s.w.Trigger(cc.ID, err) - default: - plog.Panicf("entry type should be either EntryNormal or EntryConfChange") - } - atomic.StoreUint64(&s.r.index, e.Index) - atomic.StoreUint64(&s.r.term, e.Term) - appliedt = e.Term - appliedi = e.Index - } - return appliedt, appliedi, shouldStop -} - -// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer -func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { - shouldApplyV3 := false - if e.Index > s.consistIndex.ConsistentIndex() { - // set the consistent index of current executing entry - s.consistIndex.setConsistentIndex(e.Index) - shouldApplyV3 = true - } - defer s.setAppliedIndex(e.Index) - - // raft state machine may generate noop entry when leader confirmation. - // skip it in advance to avoid some potential bug in the future - if len(e.Data) == 0 { - select { - case s.forceVersionC <- struct{}{}: - default: - } - // promote lessor when the local member is leader and finished - // applying all entries from the last term. - if s.isLeader() { - s.lessor.Promote(s.Cfg.electionTimeout()) - } - return - } - - var raftReq pb.InternalRaftRequest - if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible - var r pb.Request - pbutil.MustUnmarshal(&r, e.Data) - s.w.Trigger(r.ID, s.applyV2Request(&r)) - return - } - if raftReq.V2 != nil { - req := raftReq.V2 - s.w.Trigger(req.ID, s.applyV2Request(req)) - return - } - - // do not re-apply applied entries. - if !shouldApplyV3 { - return - } - - id := raftReq.ID - if id == 0 { - id = raftReq.Header.ID - } - - var ar *applyResult - needResult := s.w.IsRegistered(id) - if needResult || !noSideEffect(&raftReq) { - if !needResult && raftReq.Txn != nil { - removeNeedlessRangeReqs(raftReq.Txn) - } - ar = s.applyV3.Apply(&raftReq) - } - - if ar == nil { - return - } - - if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { - s.w.Trigger(id, ar) - return - } - - plog.Errorf("applying raft message exceeded backend quota") - s.goAttach(func() { - a := &pb.AlarmRequest{ - MemberID: uint64(s.ID()), - Action: pb.AlarmRequest_ACTIVATE, - Alarm: pb.AlarmType_NOSPACE, - } - r := pb.InternalRaftRequest{Alarm: a} - s.processInternalRaftRequest(context.TODO(), r) - s.w.Trigger(id, ar) - }) -} - -// applyConfChange applies a ConfChange to the server. It is only -// invoked with a ConfChange that has already passed through Raft -func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) { - if err := s.cluster.ValidateConfigurationChange(cc); err != nil { - cc.NodeID = raft.None - s.r.ApplyConfChange(cc) - return false, err - } - *confState = *s.r.ApplyConfChange(cc) - switch cc.Type { - case raftpb.ConfChangeAddNode: - m := new(membership.Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) - } - if cc.NodeID != uint64(m.ID) { - plog.Panicf("nodeID should always be equal to member ID") - } - s.cluster.AddMember(m) - if m.ID != s.id { - s.r.transport.AddPeer(m.ID, m.PeerURLs) - } - case raftpb.ConfChangeRemoveNode: - id := types.ID(cc.NodeID) - s.cluster.RemoveMember(id) - if id == s.id { - return true, nil - } - s.r.transport.RemovePeer(id) - case raftpb.ConfChangeUpdateNode: - m := new(membership.Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) - } - if cc.NodeID != uint64(m.ID) { - plog.Panicf("nodeID should always be equal to member ID") - } - s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes) - if m.ID != s.id { - s.r.transport.UpdatePeer(m.ID, m.PeerURLs) - } - } - return false, nil -} - -// TODO: non-blocking snapshot -func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { - clone := s.store.Clone() - // commit kv to write metadata (for example: consistent index) to disk. - // KV().commit() updates the consistent index in backend. - // All operations that update consistent index must be called sequentially - // from applyAll function. - // So KV().Commit() cannot run in parallel with apply. It has to be called outside - // the go routine created below. - s.KV().Commit() - - s.goAttach(func() { - d, err := clone.SaveNoCopy() - // TODO: current store will never fail to do a snapshot - // what should we do if the store might fail? - if err != nil { - plog.Panicf("store save should never fail: %v", err) - } - snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d) - if err != nil { - // the snapshot was done asynchronously with the progress of raft. - // raft might have already got a newer snapshot. - if err == raft.ErrSnapOutOfDate { - return - } - plog.Panicf("unexpected create snapshot error %v", err) - } - // SaveSnap saves the snapshot and releases the locked wal files - // to the snapshot index. - if err = s.r.storage.SaveSnap(snap); err != nil { - plog.Fatalf("save snapshot error: %v", err) - } - plog.Infof("saved snapshot at index %d", snap.Metadata.Index) - - // When sending a snapshot, etcd will pause compaction. - // After receives a snapshot, the slow follower needs to get all the entries right after - // the snapshot sent to catch up. If we do not pause compaction, the log entries right after - // the snapshot sent might already be compacted. It happens when the snapshot takes long time - // to send and save. Pausing compaction avoids triggering a snapshot sending cycle. - if atomic.LoadInt64(&s.inflightSnapshots) != 0 { - plog.Infof("skip compaction since there is an inflight snapshot") - return - } - - // keep some in memory log entries for slow followers. - compacti := uint64(1) - if snapi > numberOfCatchUpEntries { - compacti = snapi - numberOfCatchUpEntries - } - err = s.r.raftStorage.Compact(compacti) - if err != nil { - // the compaction was done asynchronously with the progress of raft. - // raft log might already been compact. - if err == raft.ErrCompacted { - return - } - plog.Panicf("unexpected compaction error %v", err) - } - plog.Infof("compacted raft log at %d", compacti) - }) -} - -// CutPeer drops messages to the specified peer. -func (s *EtcdServer) CutPeer(id types.ID) { - tr, ok := s.r.transport.(*rafthttp.Transport) - if ok { - tr.CutPeer(id) - } -} - -// MendPeer recovers the message dropping behavior of the given peer. -func (s *EtcdServer) MendPeer(id types.ID) { - tr, ok := s.r.transport.(*rafthttp.Transport) - if ok { - tr.MendPeer(id) - } -} - -func (s *EtcdServer) PauseSending() { s.r.pauseSending() } - -func (s *EtcdServer) ResumeSending() { s.r.resumeSending() } - -func (s *EtcdServer) ClusterVersion() *semver.Version { - if s.cluster == nil { - return nil - } - return s.cluster.Version() -} - -// monitorVersions checks the member's version every monitorVersionInterval. -// It updates the cluster version if all members agrees on a higher one. -// It prints out log if there is a member with a higher version than the -// local version. -func (s *EtcdServer) monitorVersions() { - for { - select { - case <-s.forceVersionC: - case <-time.After(monitorVersionInterval): - case <-s.stopping: - return - } - - if s.Leader() != s.ID() { - continue - } - - v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt)) - if v != nil { - // only keep major.minor version for comparison - v = &semver.Version{ - Major: v.Major, - Minor: v.Minor, - } - } - - // if the current version is nil: - // 1. use the decided version if possible - // 2. or use the min cluster version - if s.cluster.Version() == nil { - verStr := version.MinClusterVersion - if v != nil { - verStr = v.String() - } - s.goAttach(func() { s.updateClusterVersion(verStr) }) - continue - } - - // update cluster version only if the decided version is greater than - // the current cluster version - if v != nil && s.cluster.Version().LessThan(*v) { - s.goAttach(func() { s.updateClusterVersion(v.String()) }) - } - } -} - -func (s *EtcdServer) updateClusterVersion(ver string) { - if s.cluster.Version() == nil { - plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver)) - } else { - plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver)) - } - req := pb.Request{ - Method: "PUT", - Path: membership.StoreClusterVersionKey(), - Val: ver, - } - ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) - _, err := s.Do(ctx, req) - cancel() - switch err { - case nil: - return - case ErrStopped: - plog.Infof("aborting update cluster version because server is stopped") - return - default: - plog.Errorf("error updating cluster version (%v)", err) - } -} - -func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { - switch err { - case context.Canceled: - return ErrCanceled - case context.DeadlineExceeded: - curLeadElected := s.r.leadElectedTime() - prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) - if start.After(prevLeadLost) && start.Before(curLeadElected) { - return ErrTimeoutDueToLeaderFail - } - - lead := types.ID(atomic.LoadUint64(&s.r.lead)) - switch lead { - case types.ID(raft.None): - // TODO: return error to specify it happens because the cluster does not have leader now - case s.ID(): - if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) { - return ErrTimeoutDueToConnectionLost - } - default: - if !isConnectedSince(s.r.transport, start, lead) { - return ErrTimeoutDueToConnectionLost - } - } - - return ErrTimeout - default: - return err - } -} - -func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv } -func (s *EtcdServer) Backend() backend.Backend { - s.bemu.Lock() - defer s.bemu.Unlock() - return s.be -} - -func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore } - -func (s *EtcdServer) restoreAlarms() error { - s.applyV3 = s.newApplierV3() - as, err := alarm.NewAlarmStore(s) - if err != nil { - return err - } - s.alarmStore = as - if len(as.Get(pb.AlarmType_NOSPACE)) > 0 { - s.applyV3 = newApplierV3Capped(s.applyV3) - } - return nil -} - -func (s *EtcdServer) getAppliedIndex() uint64 { - return atomic.LoadUint64(&s.appliedIndex) -} - -func (s *EtcdServer) setAppliedIndex(v uint64) { - atomic.StoreUint64(&s.appliedIndex, v) -} - -func (s *EtcdServer) getCommittedIndex() uint64 { - return atomic.LoadUint64(&s.committedIndex) -} - -func (s *EtcdServer) setCommittedIndex(v uint64) { - atomic.StoreUint64(&s.committedIndex, v) -} - -// goAttach creates a goroutine on a given function and tracks it using -// the etcdserver waitgroup. -func (s *EtcdServer) goAttach(f func()) { - s.wgMu.RLock() // this blocks with ongoing close(s.stopping) - defer s.wgMu.RUnlock() - select { - case <-s.stopping: - plog.Warning("server has stopped (skipping goAttach)") - return - default: - } - - // now safe to add since waitgroup wait has not started yet - s.wg.Add(1) - go func() { - defer s.wg.Done() - f() - }() -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go deleted file mode 100644 index 9cfc852168b..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "io" - - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" -) - -// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf), -// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message -// as ReadCloser. -func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message { - // get a snapshot of v2 store as []byte - clone := s.store.Clone() - d, err := clone.SaveNoCopy() - if err != nil { - plog.Panicf("store save should never fail: %v", err) - } - - // commit kv to write metadata(for example: consistent index). - s.KV().Commit() - dbsnap := s.be.Snapshot() - // get a snapshot of v3 KV as readCloser - rc := newSnapshotReaderCloser(dbsnap) - - // put the []byte snapshot of store into raft snapshot and return the merged snapshot with - // KV readCloser snapshot. - snapshot := raftpb.Snapshot{ - Metadata: raftpb.SnapshotMetadata{ - Index: snapi, - Term: snapt, - ConfState: confState, - }, - Data: d, - } - m.Snapshot = snapshot - - return *snap.NewMessage(m, rc, dbsnap.Size()) -} - -func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { - pr, pw := io.Pipe() - go func() { - n, err := snapshot.WriteTo(pw) - if err == nil { - plog.Infof("wrote database snapshot out [total bytes: %d]", n) - } - pw.CloseWithError(err) - snapshot.Close() - }() - return pr -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go deleted file mode 100644 index 1bed85474e3..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "encoding/json" - "math" - "sync" - "time" -) - -// LeaderStats is used by the leader in an etcd cluster, and encapsulates -// statistics about communication with its followers -type LeaderStats struct { - // Leader is the ID of the leader in the etcd cluster. - // TODO(jonboulle): clarify that these are IDs, not names - Leader string `json:"leader"` - Followers map[string]*FollowerStats `json:"followers"` - - sync.Mutex -} - -// NewLeaderStats generates a new LeaderStats with the given id as leader -func NewLeaderStats(id string) *LeaderStats { - return &LeaderStats{ - Leader: id, - Followers: make(map[string]*FollowerStats), - } -} - -func (ls *LeaderStats) JSON() []byte { - ls.Lock() - stats := *ls - ls.Unlock() - b, err := json.Marshal(stats) - // TODO(jonboulle): appropriate error handling? - if err != nil { - plog.Errorf("error marshalling leader stats (%v)", err) - } - return b -} - -func (ls *LeaderStats) Follower(name string) *FollowerStats { - ls.Lock() - defer ls.Unlock() - fs, ok := ls.Followers[name] - if !ok { - fs = &FollowerStats{} - fs.Latency.Minimum = 1 << 63 - ls.Followers[name] = fs - } - return fs -} - -// FollowerStats encapsulates various statistics about a follower in an etcd cluster -type FollowerStats struct { - Latency LatencyStats `json:"latency"` - Counts CountsStats `json:"counts"` - - sync.Mutex -} - -// LatencyStats encapsulates latency statistics. -type LatencyStats struct { - Current float64 `json:"current"` - Average float64 `json:"average"` - averageSquare float64 - StandardDeviation float64 `json:"standardDeviation"` - Minimum float64 `json:"minimum"` - Maximum float64 `json:"maximum"` -} - -// CountsStats encapsulates raft statistics. -type CountsStats struct { - Fail uint64 `json:"fail"` - Success uint64 `json:"success"` -} - -// Succ updates the FollowerStats with a successful send -func (fs *FollowerStats) Succ(d time.Duration) { - fs.Lock() - defer fs.Unlock() - - total := float64(fs.Counts.Success) * fs.Latency.Average - totalSquare := float64(fs.Counts.Success) * fs.Latency.averageSquare - - fs.Counts.Success++ - - fs.Latency.Current = float64(d) / (1000000.0) - - if fs.Latency.Current > fs.Latency.Maximum { - fs.Latency.Maximum = fs.Latency.Current - } - - if fs.Latency.Current < fs.Latency.Minimum { - fs.Latency.Minimum = fs.Latency.Current - } - - fs.Latency.Average = (total + fs.Latency.Current) / float64(fs.Counts.Success) - fs.Latency.averageSquare = (totalSquare + fs.Latency.Current*fs.Latency.Current) / float64(fs.Counts.Success) - - // sdv = sqrt(avg(x^2) - avg(x)^2) - fs.Latency.StandardDeviation = math.Sqrt(fs.Latency.averageSquare - fs.Latency.Average*fs.Latency.Average) -} - -// Fail updates the FollowerStats with an unsuccessful send -func (fs *FollowerStats) Fail() { - fs.Lock() - defer fs.Unlock() - fs.Counts.Fail++ -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go b/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go deleted file mode 100644 index 635074c4898..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "sync" - "time" -) - -const ( - queueCapacity = 200 -) - -// RequestStats represent the stats for a request. -// It encapsulates the sending time and the size of the request. -type RequestStats struct { - SendingTime time.Time - Size int -} - -type statsQueue struct { - items [queueCapacity]*RequestStats - size int - front int - back int - totalReqSize int - rwl sync.RWMutex -} - -func (q *statsQueue) Len() int { - return q.size -} - -func (q *statsQueue) ReqSize() int { - return q.totalReqSize -} - -// FrontAndBack gets the front and back elements in the queue -// We must grab front and back together with the protection of the lock -func (q *statsQueue) frontAndBack() (*RequestStats, *RequestStats) { - q.rwl.RLock() - defer q.rwl.RUnlock() - if q.size != 0 { - return q.items[q.front], q.items[q.back] - } - return nil, nil -} - -// Insert function insert a RequestStats into the queue and update the records -func (q *statsQueue) Insert(p *RequestStats) { - q.rwl.Lock() - defer q.rwl.Unlock() - - q.back = (q.back + 1) % queueCapacity - - if q.size == queueCapacity { //dequeue - q.totalReqSize -= q.items[q.front].Size - q.front = (q.back + 1) % queueCapacity - } else { - q.size++ - } - - q.items[q.back] = p - q.totalReqSize += q.items[q.back].Size - -} - -// Rate function returns the package rate and byte rate -func (q *statsQueue) Rate() (float64, float64) { - front, back := q.frontAndBack() - - if front == nil || back == nil { - return 0, 0 - } - - if time.Since(back.SendingTime) > time.Second { - q.Clear() - return 0, 0 - } - - sampleDuration := back.SendingTime.Sub(front.SendingTime) - - pr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second) - - br := float64(q.ReqSize()) / float64(sampleDuration) * float64(time.Second) - - return pr, br -} - -// Clear function clear up the statsQueue -func (q *statsQueue) Clear() { - q.rwl.Lock() - defer q.rwl.Unlock() - q.back = -1 - q.front = 0 - q.size = 0 - q.totalReqSize = 0 -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go deleted file mode 100644 index cd450e2d199..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stats - -import ( - "encoding/json" - "log" - "sync" - "time" - - "github.com/coreos/etcd/raft" -) - -// ServerStats encapsulates various statistics about an EtcdServer and its -// communication with other members of the cluster -type ServerStats struct { - Name string `json:"name"` - // ID is the raft ID of the node. - // TODO(jonboulle): use ID instead of name? - ID string `json:"id"` - State raft.StateType `json:"state"` - StartTime time.Time `json:"startTime"` - - LeaderInfo struct { - Name string `json:"leader"` - Uptime string `json:"uptime"` - StartTime time.Time `json:"startTime"` - } `json:"leaderInfo"` - - RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"` - RecvingPkgRate float64 `json:"recvPkgRate,omitempty"` - RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"` - - SendAppendRequestCnt uint64 `json:"sendAppendRequestCnt"` - SendingPkgRate float64 `json:"sendPkgRate,omitempty"` - SendingBandwidthRate float64 `json:"sendBandwidthRate,omitempty"` - - sendRateQueue *statsQueue - recvRateQueue *statsQueue - - sync.Mutex -} - -func (ss *ServerStats) JSON() []byte { - ss.Lock() - stats := *ss - ss.Unlock() - stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String() - stats.SendingPkgRate, stats.SendingBandwidthRate = stats.SendRates() - stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.RecvRates() - b, err := json.Marshal(stats) - // TODO(jonboulle): appropriate error handling? - if err != nil { - log.Printf("stats: error marshalling server stats: %v", err) - } - return b -} - -// Initialize clears the statistics of ServerStats and resets its start time -func (ss *ServerStats) Initialize() { - if ss == nil { - return - } - now := time.Now() - ss.StartTime = now - ss.LeaderInfo.StartTime = now - ss.sendRateQueue = &statsQueue{ - back: -1, - } - ss.recvRateQueue = &statsQueue{ - back: -1, - } -} - -// RecvRates calculates and returns the rate of received append requests -func (ss *ServerStats) RecvRates() (float64, float64) { - return ss.recvRateQueue.Rate() -} - -// SendRates calculates and returns the rate of sent append requests -func (ss *ServerStats) SendRates() (float64, float64) { - return ss.sendRateQueue.Rate() -} - -// RecvAppendReq updates the ServerStats in response to an AppendRequest -// from the given leader being received -func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) { - ss.Lock() - defer ss.Unlock() - - now := time.Now() - - ss.State = raft.StateFollower - if leader != ss.LeaderInfo.Name { - ss.LeaderInfo.Name = leader - ss.LeaderInfo.StartTime = now - } - - ss.recvRateQueue.Insert( - &RequestStats{ - SendingTime: now, - Size: reqSize, - }, - ) - ss.RecvAppendRequestCnt++ -} - -// SendAppendReq updates the ServerStats in response to an AppendRequest -// being sent by this server -func (ss *ServerStats) SendAppendReq(reqSize int) { - ss.Lock() - defer ss.Unlock() - - ss.becomeLeader() - - ss.sendRateQueue.Insert( - &RequestStats{ - SendingTime: time.Now(), - Size: reqSize, - }, - ) - - ss.SendAppendRequestCnt++ -} - -func (ss *ServerStats) BecomeLeader() { - ss.Lock() - defer ss.Unlock() - ss.becomeLeader() -} - -func (ss *ServerStats) becomeLeader() { - if ss.State != raft.StateLeader { - ss.State = raft.StateLeader - ss.LeaderInfo.Name = ss.ID - ss.LeaderInfo.StartTime = time.Now() - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go b/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go deleted file mode 100644 index 2b5f7071aa7..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package stats defines a standard interface for etcd cluster statistics. -package stats - -import "github.com/coreos/pkg/capnslog" - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/stats") -) - -type Stats interface { - // SelfStats returns the struct representing statistics of this server - SelfStats() []byte - // LeaderStats returns the statistics of all followers in the cluster - // if this server is leader. Otherwise, nil is returned. - LeaderStats() []byte - // StoreStats returns statistics of the store backing this EtcdServer - StoreStats() []byte -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go deleted file mode 100644 index 693618fbd51..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/storage.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "io" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/wal" - "github.com/coreos/etcd/wal/walpb" -) - -type Storage interface { - // Save function saves ents and state to the underlying stable storage. - // Save MUST block until st and ents are on stable storage. - Save(st raftpb.HardState, ents []raftpb.Entry) error - // SaveSnap function saves snapshot to the underlying stable storage. - SaveSnap(snap raftpb.Snapshot) error - // DBFilePath returns the file path of database snapshot saved with given - // id. - DBFilePath(id uint64) (string, error) - // Close closes the Storage and performs finalization. - Close() error -} - -type storage struct { - *wal.WAL - *snap.Snapshotter -} - -func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage { - return &storage{w, s} -} - -// SaveSnap saves the snapshot to disk and release the locked -// wal files since they will not be used. -func (st *storage) SaveSnap(snap raftpb.Snapshot) error { - walsnap := walpb.Snapshot{ - Index: snap.Metadata.Index, - Term: snap.Metadata.Term, - } - err := st.WAL.SaveSnapshot(walsnap) - if err != nil { - return err - } - err = st.Snapshotter.SaveSnap(snap) - if err != nil { - return err - } - return st.WAL.ReleaseLockTo(snap.Metadata.Index) -} - -func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { - var ( - err error - wmetadata []byte - ) - - repaired := false - for { - if w, err = wal.Open(waldir, snap); err != nil { - plog.Fatalf("open wal error: %v", err) - } - if wmetadata, st, ents, err = w.ReadAll(); err != nil { - w.Close() - // we can only repair ErrUnexpectedEOF and we never repair twice. - if repaired || err != io.ErrUnexpectedEOF { - plog.Fatalf("read wal error (%v) and cannot be repaired", err) - } - if !wal.Repair(waldir) { - plog.Fatalf("WAL error (%v) cannot be repaired", err) - } else { - plog.Infof("repaired WAL error (%v)", err) - repaired = true - } - continue - } - break - } - var metadata pb.Metadata - pbutil.MustUnmarshal(&metadata, wmetadata) - id = types.ID(metadata.NodeID) - cid = types.ID(metadata.ClusterID) - return -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go deleted file mode 100644 index 66084ae1244..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/util.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "time" - - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/rafthttp" -) - -// isConnectedToQuorumSince checks whether the local member is connected to the -// quorum of the cluster since the given time. -func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { - return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1 -} - -// isConnectedSince checks whether the local member is connected to the -// remote member since the given time. -func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool { - t := transport.ActiveSince(remote) - return !t.IsZero() && t.Before(since) -} - -// isConnectedFullySince checks whether the local member is connected to all -// members in the cluster since the given time. -func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool { - return numConnectedSince(transport, since, self, members) == len(members) -} - -// numConnectedSince counts how many members are connected to the local member -// since the given time. -func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int { - connectedNum := 0 - for _, m := range members { - if m.ID == self || isConnectedSince(transport, since, m.ID) { - connectedNum++ - } - } - return connectedNum -} - -// longestConnected chooses the member with longest active-since-time. -// It returns false, if nothing is active. -func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) { - var longest types.ID - var oldest time.Time - for _, id := range membs { - tm := tp.ActiveSince(id) - if tm.IsZero() { // inactive - continue - } - - if oldest.IsZero() { // first longest candidate - oldest = tm - longest = id - } - - if tm.Before(oldest) { - oldest = tm - longest = id - } - } - if uint64(longest) == 0 { - return longest, false - } - return longest, true -} - -type notifier struct { - c chan struct{} - err error -} - -func newNotifier() *notifier { - return ¬ifier{ - c: make(chan struct{}, 0), - } -} - -func (nc *notifier) notify(err error) { - nc.err = err - close(nc.c) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go deleted file mode 100644 index 72c4eb7c5cc..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" -) - -type v2API interface { - Post(ctx context.Context, r *pb.Request) (Response, error) - Put(ctx context.Context, r *pb.Request) (Response, error) - Delete(ctx context.Context, r *pb.Request) (Response, error) - QGet(ctx context.Context, r *pb.Request) (Response, error) - Get(ctx context.Context, r *pb.Request) (Response, error) - Head(ctx context.Context, r *pb.Request) (Response, error) -} - -type v2apiStore struct{ s *EtcdServer } - -func (a *v2apiStore) Post(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) Put(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) Delete(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) QGet(ctx context.Context, r *pb.Request) (Response, error) { - return a.processRaftRequest(ctx, r) -} - -func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Response, error) { - data, err := r.Marshal() - if err != nil { - return Response{}, err - } - ch := a.s.w.Register(r.ID) - - start := time.Now() - a.s.r.Propose(ctx, data) - proposalsPending.Inc() - defer proposalsPending.Dec() - - select { - case x := <-ch: - resp := x.(Response) - return resp, resp.err - case <-ctx.Done(): - proposalsFailed.Inc() - a.s.w.Trigger(r.ID, nil) // GC wait - return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start) - case <-a.s.stopping: - } - return Response{}, ErrStopped -} - -func (a *v2apiStore) Get(ctx context.Context, r *pb.Request) (Response, error) { - if r.Wait { - wc, err := a.s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since) - if err != nil { - return Response{}, err - } - return Response{Watcher: wc}, nil - } - ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted) - if err != nil { - return Response{}, err - } - return Response{Event: ev}, nil -} - -func (a *v2apiStore) Head(ctx context.Context, r *pb.Request) (Response, error) { - ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted) - if err != nil { - return Response{}, err - } - return Response{Event: ev}, nil -} - -// Do interprets r and performs an operation on s.store according to r.Method -// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with -// Quorum == true, r will be sent through consensus before performing its -// respective operation. Do will block until an action is performed or there is -// an error. -func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { - r.ID = s.reqIDGen.Next() - if r.Method == "GET" && r.Quorum { - r.Method = "QGET" - } - v2api := (v2API)(&v2apiStore{s}) - switch r.Method { - case "POST": - return v2api.Post(ctx, &r) - case "PUT": - return v2api.Put(ctx, &r) - case "DELETE": - return v2api.Delete(ctx, &r) - case "QGET": - return v2api.QGet(ctx, &r) - case "GET": - return v2api.Get(ctx, &r) - case "HEAD": - return v2api.Head(ctx, &r) - } - return Response{}, ErrUnknownMethod -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go deleted file mode 100644 index 60653cb6dff..00000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go +++ /dev/null @@ -1,804 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "bytes" - "encoding/binary" - "time" - - "github.com/coreos/etcd/auth" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/lease/leasehttp" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/raft" - - "github.com/coreos/go-semver/semver" - "golang.org/x/net/context" -) - -const ( - // the max request size that raft accepts. - // TODO: make this a flag? But we probably do not want to - // accept large request which might block raft stream. User - // specify a large value might end up with shooting in the foot. - maxRequestBytes = 1.5 * 1024 * 1024 - - // In the health case, there might be a small gap (10s of entries) between - // the applied index and committed index. - // However, if the committed entries are very heavy to apply, the gap might grow. - // We should stop accepting new proposals if the gap growing to a certain point. - maxGapBetweenApplyAndCommitIndex = 5000 -) - -var ( - newRangeClusterVersion = *semver.Must(semver.NewVersion("3.1.0")) -) - -type RaftKV interface { - Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) - Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) - DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) - Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) - Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) -} - -type Lessor interface { - // LeaseGrant sends LeaseGrant request to raft and apply it after committed. - LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) - // LeaseRevoke sends LeaseRevoke request to raft and apply it after committed. - LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) - - // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error - // is returned. - LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) - - // LeaseTimeToLive retrieves lease information. - LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) -} - -type Authenticator interface { - AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) - AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) - Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) - UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) - UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) - UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) - UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) - UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) - UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) - RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) - RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) - RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) - RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) - RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) - UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) - RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) -} - -func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - // TODO: remove this checking when we release etcd 3.2 - if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) { - return s.legacyRange(ctx, r) - } - - if !r.Serializable { - err := s.linearizableReadNotify(ctx) - if err != nil { - return nil, err - } - } - var resp *pb.RangeResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) - } - get := func() { resp, err = s.applyV3Base.Range(noTxn, r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err -} - -// TODO: remove this func when we release etcd 3.2 -func (s *EtcdServer) legacyRange(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if r.Serializable { - var resp *pb.RangeResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) - } - get := func() { resp, err = s.applyV3Base.Range(noTxn, r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.RangeResponse), nil -} - -func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Put: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.PutResponse), nil -} - -func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.DeleteRangeResponse), nil -} - -func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - // TODO: remove this checking when we release etcd 3.2 - if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) { - return s.legacyTxn(ctx, r) - } - - if isTxnReadonly(r) { - if !isTxnSerializable(r) { - err := s.linearizableReadNotify(ctx) - if err != nil { - return nil, err - } - } - var resp *pb.TxnResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return checkTxnAuth(s.authStore, ai, r) - } - get := func() { resp, err = s.applyV3Base.Txn(r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.TxnResponse), nil -} - -// TODO: remove this func when we release etcd 3.2 -func (s *EtcdServer) legacyTxn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if isTxnSerializable(r) { - var resp *pb.TxnResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return checkTxnAuth(s.authStore, ai, r) - } - get := func() { resp, err = s.applyV3Base.Txn(r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.TxnResponse), nil -} - -func isTxnSerializable(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - return true -} - -func isTxnReadonly(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil { - return false - } - } - return true -} - -func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r}) - if r.Physical && result != nil && result.physc != nil { - <-result.physc - // The compaction is done deleting keys; the hash is now settled - // but the data is not necessarily committed. If there's a crash, - // the hash may revert to a hash prior to compaction completing - // if the compaction resumes. Force the finished compaction to - // commit so it won't resume following a crash. - s.be.ForceCommit() - } - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - resp := result.resp.(*pb.CompactionResponse) - if resp == nil { - resp = &pb.CompactionResponse{} - } - if resp.Header == nil { - resp.Header = &pb.ResponseHeader{} - } - resp.Header.Revision = s.kv.Rev() - return resp, nil -} - -func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - // no id given? choose one - for r.ID == int64(lease.NoLease) { - // only use positive int64 id's - r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1)) - } - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.LeaseGrantResponse), nil -} - -func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.LeaseRevokeResponse), nil -} - -func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) { - ttl, err := s.lessor.Renew(id) - if err == nil { // already requested to primary lessor(leader) - return ttl, nil - } - if err != lease.ErrNotPrimary { - return -1, err - } - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - // renewals don't go through raft; forward to leader manually - for cctx.Err() == nil && err != nil { - leader, lerr := s.waitLeader(cctx) - if lerr != nil { - return -1, lerr - } - for _, url := range leader.PeerURLs { - lurl := url + leasehttp.LeasePrefix - ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt) - if err == nil || err == lease.ErrLeaseNotFound { - return ttl, err - } - } - } - return -1, ErrTimeout -} - -func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - if s.Leader() == s.ID() { - // primary; timetolive directly from leader - le := s.lessor.Lookup(lease.LeaseID(r.ID)) - if le == nil { - return nil, lease.ErrLeaseNotFound - } - // TODO: fill out ResponseHeader - resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()} - if r.Keys { - ks := le.Keys() - kbs := make([][]byte, len(ks)) - for i := range ks { - kbs[i] = []byte(ks[i]) - } - resp.Keys = kbs - } - return resp, nil - } - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - // forward to leader - for cctx.Err() == nil { - leader, err := s.waitLeader(cctx) - if err != nil { - return nil, err - } - for _, url := range leader.PeerURLs { - lurl := url + leasehttp.LeaseInternalPrefix - resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt) - if err == nil { - return resp.LeaseTimeToLiveResponse, nil - } - if err == lease.ErrLeaseNotFound { - return nil, err - } - } - } - return nil, ErrTimeout -} - -func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) { - leader := s.cluster.Member(s.Leader()) - for leader == nil { - // wait an election - dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond - select { - case <-time.After(dur): - leader = s.cluster.Member(s.Leader()) - case <-s.stopping: - return nil, ErrStopped - case <-ctx.Done(): - return nil, ErrNoLeader - } - } - if leader == nil || len(leader.PeerURLs) == 0 { - return nil, ErrNoLeader - } - return leader, nil -} - -func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AlarmResponse), nil -} - -func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthEnableResponse), nil -} - -func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthDisableResponse), nil -} - -func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - var result *applyResult - - err := s.linearizableReadNotify(ctx) - if err != nil { - return nil, err - } - - for { - checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) - if err != nil { - plog.Errorf("invalid authentication request to user %s was issued", r.Name) - return nil, err - } - - st, err := s.AuthStore().GenSimpleToken() - if err != nil { - return nil, err - } - - internalReq := &pb.InternalAuthenticateRequest{ - Name: r.Name, - Password: r.Password, - SimpleToken: st, - } - - result, err = s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - - if checkedRevision != s.AuthStore().Revision() { - plog.Infof("revision when password checked is obsolete, retrying") - continue - } - - break - } - - return result.resp.(*pb.AuthenticateResponse), nil -} - -func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserAddResponse), nil -} - -func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserDeleteResponse), nil -} - -func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserChangePasswordResponse), nil -} - -func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserGrantRoleResponse), nil -} - -func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserGetResponse), nil -} - -func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserListResponse), nil -} - -func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserRevokeRoleResponse), nil -} - -func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleAddResponse), nil -} - -func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleGrantPermissionResponse), nil -} - -func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleGetResponse), nil -} - -func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleListResponse), nil -} - -func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleRevokePermissionResponse), nil -} - -func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleDeleteResponse), nil -} - -// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure. -func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { - for { - ai, err := s.AuthStore().AuthInfoFromCtx(ctx) - if err != nil { - return err - } - if ai == nil { - // chk expects non-nil AuthInfo; use empty credentials - ai = &auth.AuthInfo{} - } - if err = chk(ai); err != nil { - if err == auth.ErrAuthOldRevision { - continue - } - return err - } - // fetch response for serialized request - get() - // empty credentials or current auth info means no need to retry - if ai.Revision == 0 || ai.Revision == s.authStore.Revision() { - return nil - } - // avoid TOCTOU error, retry of the request is required. - } -} - -func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { - ai := s.getAppliedIndex() - ci := s.getCommittedIndex() - if ci > ai+maxGapBetweenApplyAndCommitIndex { - return nil, ErrTooManyRequests - } - - r.Header = &pb.RequestHeader{ - ID: s.reqIDGen.Next(), - } - - authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) - if err != nil { - return nil, err - } - if authInfo != nil { - r.Header.Username = authInfo.Username - r.Header.AuthRevision = authInfo.Revision - } - - data, err := r.Marshal() - if err != nil { - return nil, err - } - - if len(data) > maxRequestBytes { - return nil, ErrRequestTooLarge - } - - id := r.ID - if id == 0 { - id = r.Header.ID - } - ch := s.w.Register(id) - - cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) - defer cancel() - - start := time.Now() - s.r.Propose(cctx, data) - proposalsPending.Inc() - defer proposalsPending.Dec() - - select { - case x := <-ch: - return x.(*applyResult), nil - case <-cctx.Done(): - proposalsFailed.Inc() - s.w.Trigger(id, nil) // GC wait - return nil, s.parseProposeCtxErr(cctx.Err(), start) - case <-s.done: - return nil, ErrStopped - } -} - -func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { - var result *applyResult - var err error - for { - result, err = s.processInternalRaftRequestOnce(ctx, r) - if err != auth.ErrAuthOldRevision { - break - } - } - - return result, err -} - -// Watchable returns a watchable interface attached to the etcdserver. -func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } - -func (s *EtcdServer) linearizableReadLoop() { - var rs raft.ReadState - - for { - ctx := make([]byte, 8) - binary.BigEndian.PutUint64(ctx, s.reqIDGen.Next()) - - select { - case <-s.readwaitc: - case <-s.stopping: - return - } - - nextnr := newNotifier() - - s.readMu.Lock() - nr := s.readNotifier - s.readNotifier = nextnr - s.readMu.Unlock() - - cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) - if err := s.r.ReadIndex(cctx, ctx); err != nil { - cancel() - if err == raft.ErrStopped { - return - } - plog.Errorf("failed to get read index from raft: %v", err) - nr.notify(err) - continue - } - cancel() - - var ( - timeout bool - done bool - ) - for !timeout && !done { - select { - case rs = <-s.r.readStateC: - done = bytes.Equal(rs.RequestCtx, ctx) - if !done { - // a previous request might time out. now we should ignore the response of it and - // continue waiting for the response of the current requests. - plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx) - } - case <-time.After(s.Cfg.ReqTimeout()): - plog.Warningf("timed out waiting for read index response") - nr.notify(ErrTimeout) - timeout = true - case <-s.stopping: - return - } - } - if !done { - continue - } - - if ai := s.getAppliedIndex(); ai < rs.Index { - select { - case <-s.applyWait.Wait(rs.Index): - case <-s.stopping: - return - } - } - // unblock all l-reads requested at indices before rs.Index - nr.notify(nil) - } -} - -func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { - s.readMu.RLock() - nc := s.readNotifier - s.readMu.RUnlock() - - // signal linearizable loop for current notify if it hasn't been already - select { - case s.readwaitc <- struct{}{}: - default: - } - - // wait for read state notification - select { - case <-nc.c: - return nc.err - case <-ctx.Done(): - return ctx.Err() - case <-s.done: - return ErrStopped - } -} diff --git a/vendor/github.com/coreos/etcd/integration/bridge.go b/vendor/github.com/coreos/etcd/integration/bridge.go deleted file mode 100644 index b9e67318e52..00000000000 --- a/vendor/github.com/coreos/etcd/integration/bridge.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "fmt" - "io" - "net" - "sync" - - "github.com/coreos/etcd/pkg/transport" -) - -// bridge creates a unix socket bridge to another unix socket, making it possible -// to disconnect grpc network connections without closing the logical grpc connection. -type bridge struct { - inaddr string - outaddr string - l net.Listener - conns map[*bridgeConn]struct{} - - stopc chan struct{} - pausec chan struct{} - wg sync.WaitGroup - - mu sync.Mutex -} - -func newBridge(addr string) (*bridge, error) { - b := &bridge{ - // bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number - inaddr: addr + "0", - outaddr: addr, - conns: make(map[*bridgeConn]struct{}), - stopc: make(chan struct{}), - pausec: make(chan struct{}), - } - close(b.pausec) - - l, err := transport.NewUnixListener(b.inaddr) - if err != nil { - return nil, fmt.Errorf("listen failed on socket %s (%v)", addr, err) - } - b.l = l - b.wg.Add(1) - go b.serveListen() - return b, nil -} - -func (b *bridge) URL() string { return "unix://" + b.inaddr } - -func (b *bridge) Close() { - b.l.Close() - b.mu.Lock() - select { - case <-b.stopc: - default: - close(b.stopc) - } - b.mu.Unlock() - b.wg.Wait() -} - -func (b *bridge) Reset() { - b.mu.Lock() - defer b.mu.Unlock() - for bc := range b.conns { - bc.Close() - } - b.conns = make(map[*bridgeConn]struct{}) -} - -func (b *bridge) Pause() { - b.mu.Lock() - b.pausec = make(chan struct{}) - b.mu.Unlock() -} - -func (b *bridge) Unpause() { - b.mu.Lock() - select { - case <-b.pausec: - default: - close(b.pausec) - } - b.mu.Unlock() -} - -func (b *bridge) serveListen() { - defer func() { - b.l.Close() - b.mu.Lock() - for bc := range b.conns { - bc.Close() - } - b.mu.Unlock() - b.wg.Done() - }() - - for { - inc, ierr := b.l.Accept() - if ierr != nil { - return - } - b.mu.Lock() - pausec := b.pausec - b.mu.Unlock() - select { - case <-b.stopc: - inc.Close() - return - case <-pausec: - } - - outc, oerr := net.Dial("unix", b.outaddr) - if oerr != nil { - inc.Close() - return - } - - bc := &bridgeConn{inc, outc, make(chan struct{})} - b.wg.Add(1) - b.mu.Lock() - b.conns[bc] = struct{}{} - go b.serveConn(bc) - b.mu.Unlock() - } -} - -func (b *bridge) serveConn(bc *bridgeConn) { - defer func() { - close(bc.donec) - bc.Close() - b.mu.Lock() - delete(b.conns, bc) - b.mu.Unlock() - b.wg.Done() - }() - - var wg sync.WaitGroup - wg.Add(2) - go func() { - io.Copy(bc.out, bc.in) - bc.close() - wg.Done() - }() - go func() { - io.Copy(bc.in, bc.out) - bc.close() - wg.Done() - }() - wg.Wait() -} - -type bridgeConn struct { - in net.Conn - out net.Conn - donec chan struct{} -} - -func (bc *bridgeConn) Close() { - bc.close() - <-bc.donec -} - -func (bc *bridgeConn) close() { - bc.in.Close() - bc.out.Close() -} diff --git a/vendor/github.com/coreos/etcd/integration/cluster.go b/vendor/github.com/coreos/etcd/integration/cluster.go deleted file mode 100644 index 4989e1f62fa..00000000000 --- a/vendor/github.com/coreos/etcd/integration/cluster.go +++ /dev/null @@ -1,900 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package integration - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/http/httptest" - "os" - "reflect" - "sort" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - - "github.com/coreos/etcd/client" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v2http" - "github.com/coreos/etcd/etcdserver/api/v3rpc" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/testutil" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/rafthttp" - "github.com/coreos/pkg/capnslog" -) - -const ( - tickDuration = 10 * time.Millisecond - clusterName = "etcd" - requestTimeout = 20 * time.Second - - basePort = 21000 - UrlScheme = "unix" - UrlSchemeTLS = "unixs" -) - -var ( - electionTicks = 10 - - // integration test uses unique ports, counting up, to listen for each - // member, ensuring restarted members can listen on the same port again. - localListenCount int64 = 0 - - testTLSInfo = transport.TLSInfo{ - KeyFile: "./fixtures/server.key.insecure", - CertFile: "./fixtures/server.crt", - TrustedCAFile: "./fixtures/ca.crt", - ClientCertAuth: true, - } - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "integration") -) - -type ClusterConfig struct { - Size int - PeerTLS *transport.TLSInfo - ClientTLS *transport.TLSInfo - DiscoveryURL string - UseGRPC bool - QuotaBackendBytes int64 -} - -type cluster struct { - cfg *ClusterConfig - Members []*member -} - -func init() { - // manually enable v3 capability since we know the cluster members all support v3. - api.EnableCapability(api.V3rpcCapability) -} - -func schemeFromTLSInfo(tls *transport.TLSInfo) string { - if tls == nil { - return UrlScheme - } - return UrlSchemeTLS -} - -func (c *cluster) fillClusterForMembers() error { - if c.cfg.DiscoveryURL != "" { - // cluster will be discovered - return nil - } - - addrs := make([]string, 0) - for _, m := range c.Members { - scheme := schemeFromTLSInfo(m.PeerTLSInfo) - for _, l := range m.PeerListeners { - addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String())) - } - } - clusterStr := strings.Join(addrs, ",") - var err error - for _, m := range c.Members { - m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) - if err != nil { - return err - } - } - return nil -} - -func newCluster(t *testing.T, cfg *ClusterConfig) *cluster { - c := &cluster{cfg: cfg} - ms := make([]*member, cfg.Size) - for i := 0; i < cfg.Size; i++ { - ms[i] = c.mustNewMember(t) - } - c.Members = ms - if err := c.fillClusterForMembers(); err != nil { - t.Fatal(err) - } - - return c -} - -// NewCluster returns an unlaunched cluster of the given size which has been -// set to use static bootstrap. -func NewCluster(t *testing.T, size int) *cluster { - return newCluster(t, &ClusterConfig{Size: size}) -} - -// NewClusterByConfig returns an unlaunched cluster defined by a cluster configuration -func NewClusterByConfig(t *testing.T, cfg *ClusterConfig) *cluster { - return newCluster(t, cfg) -} - -func (c *cluster) Launch(t *testing.T) { - errc := make(chan error) - for _, m := range c.Members { - // Members are launched in separate goroutines because if they boot - // using discovery url, they have to wait for others to register to continue. - go func(m *member) { - errc <- m.Launch() - }(m) - } - for range c.Members { - if err := <-errc; err != nil { - t.Fatalf("error setting up member: %v", err) - } - } - // wait cluster to be stable to receive future client requests - c.waitMembersMatch(t, c.HTTPMembers()) - c.waitVersion() -} - -func (c *cluster) URL(i int) string { - return c.Members[i].ClientURLs[0].String() -} - -// URLs returns a list of all active client URLs in the cluster -func (c *cluster) URLs() []string { - urls := make([]string, 0) - for _, m := range c.Members { - select { - case <-m.s.StopNotify(): - continue - default: - } - for _, u := range m.ClientURLs { - urls = append(urls, u.String()) - } - } - return urls -} - -// HTTPMembers returns a list of all active members as client.Members -func (c *cluster) HTTPMembers() []client.Member { - ms := []client.Member{} - for _, m := range c.Members { - pScheme := schemeFromTLSInfo(m.PeerTLSInfo) - cScheme := schemeFromTLSInfo(m.ClientTLSInfo) - cm := client.Member{Name: m.Name} - for _, ln := range m.PeerListeners { - cm.PeerURLs = append(cm.PeerURLs, pScheme+"://"+ln.Addr().String()) - } - for _, ln := range m.ClientListeners { - cm.ClientURLs = append(cm.ClientURLs, cScheme+"://"+ln.Addr().String()) - } - ms = append(ms, cm) - } - return ms -} - -func (c *cluster) mustNewMember(t *testing.T) *member { - m := mustNewMember(t, - memberConfig{ - name: c.name(rand.Int()), - peerTLS: c.cfg.PeerTLS, - clientTLS: c.cfg.ClientTLS, - quotaBackendBytes: c.cfg.QuotaBackendBytes, - }) - m.DiscoveryURL = c.cfg.DiscoveryURL - if c.cfg.UseGRPC { - if err := m.listenGRPC(); err != nil { - t.Fatal(err) - } - } - return m -} - -func (c *cluster) addMember(t *testing.T) { - m := c.mustNewMember(t) - - scheme := schemeFromTLSInfo(c.cfg.PeerTLS) - - // send add request to the cluster - var err error - for i := 0; i < len(c.Members); i++ { - clientURL := c.URL(i) - peerURL := scheme + "://" + m.PeerListeners[0].Addr().String() - if err = c.addMemberByURL(t, clientURL, peerURL); err == nil { - break - } - } - if err != nil { - t.Fatalf("add member failed on all members error: %v", err) - } - - m.InitialPeerURLsMap = types.URLsMap{} - for _, mm := range c.Members { - m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs - } - m.InitialPeerURLsMap[m.Name] = m.PeerURLs - m.NewCluster = false - if err := m.Launch(); err != nil { - t.Fatal(err) - } - c.Members = append(c.Members, m) - // wait cluster to be stable to receive future client requests - c.waitMembersMatch(t, c.HTTPMembers()) -} - -func (c *cluster) addMemberByURL(t *testing.T, clientURL, peerURL string) error { - cc := MustNewHTTPClient(t, []string{clientURL}, c.cfg.ClientTLS) - ma := client.NewMembersAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - if _, err := ma.Add(ctx, peerURL); err != nil { - return err - } - cancel() - - // wait for the add node entry applied in the cluster - members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}}) - c.waitMembersMatch(t, members) - return nil -} - -func (c *cluster) AddMember(t *testing.T) { - c.addMember(t) -} - -func (c *cluster) RemoveMember(t *testing.T, id uint64) { - if err := c.removeMember(t, id); err != nil { - t.Fatal(err) - } -} - -func (c *cluster) removeMember(t *testing.T, id uint64) error { - // send remove request to the cluster - cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS) - ma := client.NewMembersAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - if err := ma.Remove(ctx, types.ID(id).String()); err != nil { - return err - } - cancel() - newMembers := make([]*member, 0) - for _, m := range c.Members { - if uint64(m.s.ID()) != id { - newMembers = append(newMembers, m) - } else { - select { - case <-m.s.StopNotify(): - m.Terminate(t) - // 1s stop delay + election timeout + 1s disk and network delay + connection write timeout - // TODO: remove connection write timeout by selecting on http response closeNotifier - // blocking on https://github.com/golang/go/issues/9524 - case <-time.After(time.Second + time.Duration(electionTicks)*tickDuration + time.Second + rafthttp.ConnWriteTimeout): - t.Fatalf("failed to remove member %s in time", m.s.ID()) - } - } - } - c.Members = newMembers - c.waitMembersMatch(t, c.HTTPMembers()) - return nil -} - -func (c *cluster) Terminate(t *testing.T) { - for _, m := range c.Members { - m.Terminate(t) - } -} - -func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) { - for _, u := range c.URLs() { - cc := MustNewHTTPClient(t, []string{u}, c.cfg.ClientTLS) - ma := client.NewMembersAPI(cc) - for { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - ms, err := ma.List(ctx) - cancel() - if err == nil && isMembersEqual(ms, membs) { - break - } - time.Sleep(tickDuration) - } - } - return -} - -func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) } - -// waitLeader waits until given members agree on the same leader. -func (c *cluster) waitLeader(t *testing.T, membs []*member) int { - possibleLead := make(map[uint64]bool) - var lead uint64 - for _, m := range membs { - possibleLead[uint64(m.s.ID())] = true - } - - for lead == 0 || !possibleLead[lead] { - lead = 0 - for _, m := range membs { - select { - case <-m.s.StopNotify(): - continue - default: - } - if lead != 0 && lead != m.s.Lead() { - lead = 0 - time.Sleep(10 * tickDuration) - break - } - lead = m.s.Lead() - } - } - - for i, m := range membs { - if uint64(m.s.ID()) == lead { - return i - } - } - - return -1 -} - -func (c *cluster) WaitNoLeader(t *testing.T) { c.waitNoLeader(t, c.Members) } - -// waitNoLeader waits until given members lose leader. -func (c *cluster) waitNoLeader(t *testing.T, membs []*member) { - noLeader := false - for !noLeader { - noLeader = true - for _, m := range membs { - select { - case <-m.s.StopNotify(): - continue - default: - } - if m.s.Lead() != 0 { - noLeader = false - time.Sleep(10 * tickDuration) - break - } - } - } -} - -func (c *cluster) waitVersion() { - for _, m := range c.Members { - for { - if m.s.ClusterVersion() != nil { - break - } - time.Sleep(tickDuration) - } - } -} - -func (c *cluster) name(i int) string { - return fmt.Sprint(i) -} - -// isMembersEqual checks whether two members equal except ID field. -// The given wmembs should always set ID field to empty string. -func isMembersEqual(membs []client.Member, wmembs []client.Member) bool { - sort.Sort(SortableMemberSliceByPeerURLs(membs)) - sort.Sort(SortableMemberSliceByPeerURLs(wmembs)) - for i := range membs { - membs[i].ID = "" - } - return reflect.DeepEqual(membs, wmembs) -} - -func newLocalListener(t *testing.T) net.Listener { - c := atomic.AddInt64(&localListenCount, 1) - // Go 1.8+ allows only numbers in port - addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+basePort, os.Getpid()) - return NewListenerWithAddr(t, addr) -} - -func NewListenerWithAddr(t *testing.T, addr string) net.Listener { - l, err := transport.NewUnixListener(addr) - if err != nil { - t.Fatal(err) - } - return l -} - -type member struct { - etcdserver.ServerConfig - PeerListeners, ClientListeners []net.Listener - grpcListener net.Listener - // PeerTLSInfo enables peer TLS when set - PeerTLSInfo *transport.TLSInfo - // ClientTLSInfo enables client TLS when set - ClientTLSInfo *transport.TLSInfo - - raftHandler *testutil.PauseableHandler - s *etcdserver.EtcdServer - hss []*httptest.Server - - grpcServer *grpc.Server - grpcAddr string - grpcBridge *bridge - - keepDataDirTerminate bool -} - -func (m *member) GRPCAddr() string { return m.grpcAddr } - -type memberConfig struct { - name string - peerTLS *transport.TLSInfo - clientTLS *transport.TLSInfo - quotaBackendBytes int64 -} - -// mustNewMember return an inited member with the given name. If peerTLS is -// set, it will use https scheme to communicate between peers. -func mustNewMember(t *testing.T, mcfg memberConfig) *member { - var err error - m := &member{} - - peerScheme := schemeFromTLSInfo(mcfg.peerTLS) - clientScheme := schemeFromTLSInfo(mcfg.clientTLS) - - pln := newLocalListener(t) - m.PeerListeners = []net.Listener{pln} - m.PeerURLs, err = types.NewURLs([]string{peerScheme + "://" + pln.Addr().String()}) - if err != nil { - t.Fatal(err) - } - m.PeerTLSInfo = mcfg.peerTLS - - cln := newLocalListener(t) - m.ClientListeners = []net.Listener{cln} - m.ClientURLs, err = types.NewURLs([]string{clientScheme + "://" + cln.Addr().String()}) - if err != nil { - t.Fatal(err) - } - m.ClientTLSInfo = mcfg.clientTLS - - m.Name = mcfg.name - - m.DataDir, err = ioutil.TempDir(os.TempDir(), "etcd") - if err != nil { - t.Fatal(err) - } - clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.name, peerScheme, pln.Addr().String()) - m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) - if err != nil { - t.Fatal(err) - } - m.InitialClusterToken = clusterName - m.NewCluster = true - m.BootstrapTimeout = 10 * time.Millisecond - if m.PeerTLSInfo != nil { - m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo - } - m.ElectionTicks = electionTicks - m.TickMs = uint(tickDuration / time.Millisecond) - m.QuotaBackendBytes = mcfg.quotaBackendBytes - return m -} - -// listenGRPC starts a grpc server over a unix domain socket on the member -func (m *member) listenGRPC() error { - // prefix with localhost so cert has right domain - m.grpcAddr = "localhost:" + m.Name - l, err := transport.NewUnixListener(m.grpcAddr) - if err != nil { - return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err) - } - m.grpcBridge, err = newBridge(m.grpcAddr) - if err != nil { - l.Close() - return err - } - m.grpcAddr = m.grpcBridge.URL() - m.grpcListener = l - return nil -} - -func (m *member) electionTimeout() time.Duration { - return time.Duration(m.s.Cfg.ElectionTicks) * time.Millisecond -} - -func (m *member) DropConnections() { m.grpcBridge.Reset() } -func (m *member) PauseConnections() { m.grpcBridge.Pause() } -func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() } - -// NewClientV3 creates a new grpc client connection to the member -func NewClientV3(m *member) (*clientv3.Client, error) { - if m.grpcAddr == "" { - return nil, fmt.Errorf("member not configured for grpc") - } - - cfg := clientv3.Config{ - Endpoints: []string{m.grpcAddr}, - DialTimeout: 5 * time.Second, - } - - if m.ClientTLSInfo != nil { - tls, err := m.ClientTLSInfo.ClientConfig() - if err != nil { - return nil, err - } - cfg.TLS = tls - } - return newClientV3(cfg) -} - -// Clone returns a member with the same server configuration. The returned -// member will not set PeerListeners and ClientListeners. -func (m *member) Clone(t *testing.T) *member { - mm := &member{} - mm.ServerConfig = m.ServerConfig - - var err error - clientURLStrs := m.ClientURLs.StringSlice() - mm.ClientURLs, err = types.NewURLs(clientURLStrs) - if err != nil { - // this should never fail - panic(err) - } - peerURLStrs := m.PeerURLs.StringSlice() - mm.PeerURLs, err = types.NewURLs(peerURLStrs) - if err != nil { - // this should never fail - panic(err) - } - clusterStr := m.InitialPeerURLsMap.String() - mm.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) - if err != nil { - // this should never fail - panic(err) - } - mm.InitialClusterToken = m.InitialClusterToken - mm.ElectionTicks = m.ElectionTicks - mm.PeerTLSInfo = m.PeerTLSInfo - mm.ClientTLSInfo = m.ClientTLSInfo - return mm -} - -// Launch starts a member based on ServerConfig, PeerListeners -// and ClientListeners. -func (m *member) Launch() error { - plog.Printf("launching %s (%s)", m.Name, m.grpcAddr) - var err error - if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil { - return fmt.Errorf("failed to initialize the etcd server: %v", err) - } - m.s.SyncTicker = time.Tick(500 * time.Millisecond) - m.s.Start() - - m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)} - - for _, ln := range m.PeerListeners { - hs := &httptest.Server{ - Listener: ln, - Config: &http.Server{Handler: m.raftHandler}, - } - if m.PeerTLSInfo == nil { - hs.Start() - } else { - hs.TLS, err = m.PeerTLSInfo.ServerConfig() - if err != nil { - return err - } - hs.StartTLS() - } - m.hss = append(m.hss, hs) - } - for _, ln := range m.ClientListeners { - hs := &httptest.Server{ - Listener: ln, - Config: &http.Server{Handler: v2http.NewClientHandler(m.s, m.ServerConfig.ReqTimeout())}, - } - if m.ClientTLSInfo == nil { - hs.Start() - } else { - hs.TLS, err = m.ClientTLSInfo.ServerConfig() - if err != nil { - return err - } - hs.StartTLS() - } - m.hss = append(m.hss, hs) - } - if m.grpcListener != nil { - var ( - tlscfg *tls.Config - ) - if m.ClientTLSInfo != nil && !m.ClientTLSInfo.Empty() { - tlscfg, err = m.ClientTLSInfo.ServerConfig() - if err != nil { - return err - } - } - m.grpcServer = v3rpc.Server(m.s, tlscfg) - go m.grpcServer.Serve(m.grpcListener) - } - - plog.Printf("launched %s (%s)", m.Name, m.grpcAddr) - return nil -} - -func (m *member) WaitOK(t *testing.T) { - cc := MustNewHTTPClient(t, []string{m.URL()}, m.ClientTLSInfo) - kapi := client.NewKeysAPI(cc) - for { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - _, err := kapi.Get(ctx, "/", nil) - if err != nil { - time.Sleep(tickDuration) - continue - } - cancel() - break - } - for m.s.Leader() == 0 { - time.Sleep(tickDuration) - } -} - -func (m *member) URL() string { return m.ClientURLs[0].String() } - -func (m *member) Pause() { - m.raftHandler.Pause() - m.s.PauseSending() -} - -func (m *member) Resume() { - m.raftHandler.Resume() - m.s.ResumeSending() -} - -// Close stops the member's etcdserver and closes its connections -func (m *member) Close() { - if m.grpcBridge != nil { - m.grpcBridge.Close() - m.grpcBridge = nil - } - if m.grpcServer != nil { - m.grpcServer.Stop() - m.grpcServer = nil - } - m.s.HardStop() - for _, hs := range m.hss { - hs.CloseClientConnections() - hs.Close() - } -} - -// Stop stops the member, but the data dir of the member is preserved. -func (m *member) Stop(t *testing.T) { - plog.Printf("stopping %s (%s)", m.Name, m.grpcAddr) - m.Close() - m.hss = nil - plog.Printf("stopped %s (%s)", m.Name, m.grpcAddr) -} - -// checkLeaderTransition waits for leader transition, returning the new leader ID. -func checkLeaderTransition(t *testing.T, m *member, oldLead uint64) uint64 { - interval := time.Duration(m.s.Cfg.TickMs) * time.Millisecond - for m.s.Lead() == 0 || (m.s.Lead() == oldLead) { - time.Sleep(interval) - } - return m.s.Lead() -} - -// StopNotify unblocks when a member stop completes -func (m *member) StopNotify() <-chan struct{} { - return m.s.StopNotify() -} - -// Restart starts the member using the preserved data dir. -func (m *member) Restart(t *testing.T) error { - plog.Printf("restarting %s (%s)", m.Name, m.grpcAddr) - newPeerListeners := make([]net.Listener, 0) - for _, ln := range m.PeerListeners { - newPeerListeners = append(newPeerListeners, NewListenerWithAddr(t, ln.Addr().String())) - } - m.PeerListeners = newPeerListeners - newClientListeners := make([]net.Listener, 0) - for _, ln := range m.ClientListeners { - newClientListeners = append(newClientListeners, NewListenerWithAddr(t, ln.Addr().String())) - } - m.ClientListeners = newClientListeners - - if m.grpcListener != nil { - if err := m.listenGRPC(); err != nil { - t.Fatal(err) - } - } - - err := m.Launch() - plog.Printf("restarted %s (%s)", m.Name, m.grpcAddr) - return err -} - -// Terminate stops the member and removes the data dir. -func (m *member) Terminate(t *testing.T) { - plog.Printf("terminating %s (%s)", m.Name, m.grpcAddr) - m.Close() - if !m.keepDataDirTerminate { - if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil { - t.Fatal(err) - } - } - plog.Printf("terminated %s (%s)", m.Name, m.grpcAddr) -} - -// Metric gets the metric value for a member -func (m *member) Metric(metricName string) (string, error) { - cfgtls := transport.TLSInfo{} - tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second) - if err != nil { - return "", err - } - cli := &http.Client{Transport: tr} - resp, err := cli.Get(m.ClientURLs[0].String() + "/metrics") - if err != nil { - return "", err - } - defer resp.Body.Close() - b, rerr := ioutil.ReadAll(resp.Body) - if rerr != nil { - return "", rerr - } - lines := strings.Split(string(b), "\n") - for _, l := range lines { - if strings.HasPrefix(l, metricName) { - return strings.Split(l, " ")[1], nil - } - } - return "", nil -} - -// InjectPartition drops connections from m to others, vice versa. -func (m *member) InjectPartition(t *testing.T, others []*member) { - for _, other := range others { - m.s.CutPeer(other.s.ID()) - other.s.CutPeer(m.s.ID()) - } -} - -// RecoverPartition recovers connections from m to others, vice versa. -func (m *member) RecoverPartition(t *testing.T, others []*member) { - for _, other := range others { - m.s.MendPeer(other.s.ID()) - other.s.MendPeer(m.s.ID()) - } -} - -func MustNewHTTPClient(t *testing.T, eps []string, tls *transport.TLSInfo) client.Client { - cfgtls := transport.TLSInfo{} - if tls != nil { - cfgtls = *tls - } - cfg := client.Config{Transport: mustNewTransport(t, cfgtls), Endpoints: eps} - c, err := client.New(cfg) - if err != nil { - t.Fatal(err) - } - return c -} - -func mustNewTransport(t *testing.T, tlsInfo transport.TLSInfo) *http.Transport { - // tick in integration test is short, so 1s dial timeout could play well. - tr, err := transport.NewTimeoutTransport(tlsInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) - if err != nil { - t.Fatal(err) - } - return tr -} - -type SortableMemberSliceByPeerURLs []client.Member - -func (p SortableMemberSliceByPeerURLs) Len() int { return len(p) } -func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool { - return p[i].PeerURLs[0] < p[j].PeerURLs[0] -} -func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type ClusterV3 struct { - *cluster - - mu sync.Mutex - clients []*clientv3.Client -} - -// NewClusterV3 returns a launched cluster with a grpc client connection -// for each cluster member. -func NewClusterV3(t *testing.T, cfg *ClusterConfig) *ClusterV3 { - cfg.UseGRPC = true - clus := &ClusterV3{ - cluster: NewClusterByConfig(t, cfg), - } - clus.Launch(t) - for _, m := range clus.Members { - client, err := NewClientV3(m) - if err != nil { - t.Fatalf("cannot create client: %v", err) - } - clus.clients = append(clus.clients, client) - } - - return clus -} - -func (c *ClusterV3) TakeClient(idx int) { - c.mu.Lock() - c.clients[idx] = nil - c.mu.Unlock() -} - -func (c *ClusterV3) Terminate(t *testing.T) { - c.mu.Lock() - for _, client := range c.clients { - if client == nil { - continue - } - if err := client.Close(); err != nil { - t.Error(err) - } - } - c.mu.Unlock() - c.cluster.Terminate(t) -} - -func (c *ClusterV3) RandClient() *clientv3.Client { - return c.clients[rand.Intn(len(c.clients))] -} - -func (c *ClusterV3) Client(i int) *clientv3.Client { - return c.clients[i] -} - -type grpcAPI struct { - // Cluster is the cluster API for the client's connection. - Cluster pb.ClusterClient - // KV is the keyvalue API for the client's connection. - KV pb.KVClient - // Lease is the lease API for the client's connection. - Lease pb.LeaseClient - // Watch is the watch API for the client's connection. - Watch pb.WatchClient - // Maintenance is the maintenance API for the client's connection. - Maintenance pb.MaintenanceClient - // Auth is the authentication API for the client's connection. - Auth pb.AuthClient -} diff --git a/vendor/github.com/coreos/etcd/integration/cluster_direct.go b/vendor/github.com/coreos/etcd/integration/cluster_direct.go deleted file mode 100644 index 84b2a796cc0..00000000000 --- a/vendor/github.com/coreos/etcd/integration/cluster_direct.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !cluster_proxy - -package integration - -import ( - "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -func toGRPC(c *clientv3.Client) grpcAPI { - return grpcAPI{ - pb.NewClusterClient(c.ActiveConnection()), - pb.NewKVClient(c.ActiveConnection()), - pb.NewLeaseClient(c.ActiveConnection()), - pb.NewWatchClient(c.ActiveConnection()), - pb.NewMaintenanceClient(c.ActiveConnection()), - pb.NewAuthClient(c.ActiveConnection()), - } -} - -func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { - return clientv3.New(cfg) -} diff --git a/vendor/github.com/coreos/etcd/integration/cluster_proxy.go b/vendor/github.com/coreos/etcd/integration/cluster_proxy.go deleted file mode 100644 index 75319218ec6..00000000000 --- a/vendor/github.com/coreos/etcd/integration/cluster_proxy.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build cluster_proxy - -package integration - -import ( - "sync" - - "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/proxy/grpcproxy" -) - -var ( - pmu sync.Mutex - proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy) -) - -type grpcClientProxy struct { - grpc grpcAPI - wdonec <-chan struct{} - kvdonec <-chan struct{} -} - -func toGRPC(c *clientv3.Client) grpcAPI { - pmu.Lock() - defer pmu.Unlock() - - if v, ok := proxies[c]; ok { - return v.grpc - } - - wp, wpch := grpcproxy.NewWatchProxy(c) - kvp, kvpch := grpcproxy.NewKvProxy(c) - grpc := grpcAPI{ - pb.NewClusterClient(c.ActiveConnection()), - grpcproxy.KvServerToKvClient(kvp), - pb.NewLeaseClient(c.ActiveConnection()), - grpcproxy.WatchServerToWatchClient(wp), - pb.NewMaintenanceClient(c.ActiveConnection()), - pb.NewAuthClient(c.ActiveConnection()), - } - proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch} - return grpc -} - -type proxyCloser struct { - clientv3.Watcher - wdonec <-chan struct{} - kvdonec <-chan struct{} -} - -func (pc *proxyCloser) Close() error { - // client ctx is canceled before calling close, so kv will close out - <-pc.kvdonec - err := pc.Watcher.Close() - <-pc.wdonec - return err -} - -func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { - c, err := clientv3.New(cfg) - if err != nil { - return nil, err - } - rpc := toGRPC(c) - c.KV = clientv3.NewKVFromKVClient(rpc.KV) - pmu.Lock() - c.Watcher = &proxyCloser{ - Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch), - wdonec: proxies[c].wdonec, - kvdonec: proxies[c].kvdonec, - } - pmu.Unlock() - return c, nil -} diff --git a/vendor/github.com/coreos/etcd/integration/doc.go b/vendor/github.com/coreos/etcd/integration/doc.go deleted file mode 100644 index fbf19d54368..00000000000 --- a/vendor/github.com/coreos/etcd/integration/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package integration implements tests built upon embedded etcd, and focus on -etcd correctness. - -Features/goals of the integration tests: -1. test the whole code base except command-line parsing. -2. check internal data, including raft, store and etc. -3. based on goroutines, which is faster than process. -4. mainly tests user behavior and user-facing API. -*/ -package integration diff --git a/vendor/github.com/coreos/etcd/lease/doc.go b/vendor/github.com/coreos/etcd/lease/doc.go deleted file mode 100644 index 73e7d0ec51a..00000000000 --- a/vendor/github.com/coreos/etcd/lease/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package lease provides an interface and implemetation for time-limited leases over arbitrary resources. -package lease diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go b/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go deleted file mode 100644 index 8177a37b663..00000000000 --- a/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package leasehttp serves lease renewals made through HTTP requests. -package leasehttp diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go deleted file mode 100644 index 256051efc8d..00000000000 --- a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package leasehttp - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "net/http" - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/lease/leasepb" - "github.com/coreos/etcd/pkg/httputil" - "golang.org/x/net/context" -) - -var ( - LeasePrefix = "/leases" - LeaseInternalPrefix = "/leases/internal" - applyTimeout = time.Second - ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out") -) - -// NewHandler returns an http Handler for lease renewals -func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler { - return &leaseHandler{l, waitch} -} - -type leaseHandler struct { - l lease.Lessor - waitch func() <-chan struct{} -} - -func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil { - http.Error(w, "error reading body", http.StatusBadRequest) - return - } - - var v []byte - switch r.URL.Path { - case LeasePrefix: - lreq := pb.LeaseKeepAliveRequest{} - if err := lreq.Unmarshal(b); err != nil { - http.Error(w, "error unmarshalling request", http.StatusBadRequest) - return - } - select { - case <-h.waitch(): - case <-time.After(applyTimeout): - http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout) - return - } - ttl, err := h.l.Renew(lease.LeaseID(lreq.ID)) - if err != nil { - if err == lease.ErrLeaseNotFound { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - // TODO: fill out ResponseHeader - resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl} - v, err = resp.Marshal() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - case LeaseInternalPrefix: - lreq := leasepb.LeaseInternalRequest{} - if err := lreq.Unmarshal(b); err != nil { - http.Error(w, "error unmarshalling request", http.StatusBadRequest) - return - } - select { - case <-h.waitch(): - case <-time.After(applyTimeout): - http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout) - return - } - l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID)) - if l == nil { - http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound) - return - } - // TODO: fill out ResponseHeader - resp := &leasepb.LeaseInternalResponse{ - LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{ - Header: &pb.ResponseHeader{}, - ID: lreq.LeaseTimeToLiveRequest.ID, - TTL: int64(l.Remaining().Seconds()), - GrantedTTL: l.TTL(), - }, - } - if lreq.LeaseTimeToLiveRequest.Keys { - ks := l.Keys() - kbs := make([][]byte, len(ks)) - for i := range ks { - kbs[i] = []byte(ks[i]) - } - resp.LeaseTimeToLiveResponse.Keys = kbs - } - - v, err = resp.Marshal() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - default: - http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest) - return - } - - w.Header().Set("Content-Type", "application/protobuf") - w.Write(v) -} - -// RenewHTTP renews a lease at a given primary server. -// TODO: Batch request in future? -func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundTripper) (int64, error) { - // will post lreq protobuf to leader - lreq, err := (&pb.LeaseKeepAliveRequest{ID: int64(id)}).Marshal() - if err != nil { - return -1, err - } - - cc := &http.Client{Transport: rt} - req, err := http.NewRequest("POST", url, bytes.NewReader(lreq)) - if err != nil { - return -1, err - } - req.Header.Set("Content-Type", "application/protobuf") - req.Cancel = ctx.Done() - - resp, err := cc.Do(req) - if err != nil { - return -1, err - } - b, err := readResponse(resp) - if err != nil { - return -1, err - } - - if resp.StatusCode == http.StatusRequestTimeout { - return -1, ErrLeaseHTTPTimeout - } - - if resp.StatusCode == http.StatusNotFound { - return -1, lease.ErrLeaseNotFound - } - - if resp.StatusCode != http.StatusOK { - return -1, fmt.Errorf("lease: unknown error(%s)", string(b)) - } - - lresp := &pb.LeaseKeepAliveResponse{} - if err := lresp.Unmarshal(b); err != nil { - return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) - } - if lresp.ID != int64(id) { - return -1, fmt.Errorf("lease: renew id mismatch") - } - return lresp.TTL, nil -} - -// TimeToLiveHTTP retrieves lease information of the given lease ID. -func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) { - // will post lreq protobuf to leader - lreq, err := (&leasepb.LeaseInternalRequest{&pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: keys}}).Marshal() - if err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", url, bytes.NewReader(lreq)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/protobuf") - - cancel := httputil.RequestCanceler(req) - - cc := &http.Client{Transport: rt} - var b []byte - // buffer errc channel so that errc don't block inside the go routinue - errc := make(chan error, 2) - go func() { - resp, err := cc.Do(req) - if err != nil { - errc <- err - return - } - b, err = readResponse(resp) - if err != nil { - errc <- err - return - } - if resp.StatusCode == http.StatusRequestTimeout { - errc <- ErrLeaseHTTPTimeout - return - } - if resp.StatusCode == http.StatusNotFound { - errc <- lease.ErrLeaseNotFound - return - } - if resp.StatusCode != http.StatusOK { - errc <- fmt.Errorf("lease: unknown error(%s)", string(b)) - return - } - errc <- nil - }() - select { - case derr := <-errc: - if derr != nil { - return nil, derr - } - case <-ctx.Done(): - cancel() - return nil, ctx.Err() - } - - lresp := &leasepb.LeaseInternalResponse{} - if err := lresp.Unmarshal(b); err != nil { - return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) - } - if lresp.LeaseTimeToLiveResponse.ID != int64(id) { - return nil, fmt.Errorf("lease: renew id mismatch") - } - return lresp, nil -} - -func readResponse(resp *http.Response) (b []byte, err error) { - b, err = ioutil.ReadAll(resp.Body) - httputil.GracefulClose(resp) - return -} diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go deleted file mode 100644 index fb3a9bab0c3..00000000000 --- a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go +++ /dev/null @@ -1,608 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: lease.proto -// DO NOT EDIT! - -/* - Package leasepb is a generated protocol buffer package. - - It is generated from these files: - lease.proto - - It has these top-level messages: - Lease - LeaseInternalRequest - LeaseInternalResponse -*/ -package leasepb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Lease struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"` -} - -func (m *Lease) Reset() { *m = Lease{} } -func (m *Lease) String() string { return proto.CompactTextString(m) } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{0} } - -type LeaseInternalRequest struct { - LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest" json:"LeaseTimeToLiveRequest,omitempty"` -} - -func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} } -func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseInternalRequest) ProtoMessage() {} -func (*LeaseInternalRequest) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{1} } - -type LeaseInternalResponse struct { - LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse" json:"LeaseTimeToLiveResponse,omitempty"` -} - -func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} } -func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseInternalResponse) ProtoMessage() {} -func (*LeaseInternalResponse) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{2} } - -func init() { - proto.RegisterType((*Lease)(nil), "leasepb.Lease") - proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest") - proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse") -} -func (m *Lease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ID != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintLease(dAtA, i, uint64(m.ID)) - } - if m.TTL != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintLease(dAtA, i, uint64(m.TTL)) - } - return i, nil -} - -func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseInternalRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.LeaseTimeToLiveRequest != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveRequest.Size())) - n1, err := m.LeaseTimeToLiveRequest.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - return i, nil -} - -func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseInternalResponse) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.LeaseTimeToLiveResponse != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveResponse.Size())) - n2, err := m.LeaseTimeToLiveResponse.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func encodeFixed64Lease(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Lease(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintLease(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Lease) Size() (n int) { - var l int - _ = l - if m.ID != 0 { - n += 1 + sovLease(uint64(m.ID)) - } - if m.TTL != 0 { - n += 1 + sovLease(uint64(m.TTL)) - } - return n -} - -func (m *LeaseInternalRequest) Size() (n int) { - var l int - _ = l - if m.LeaseTimeToLiveRequest != nil { - l = m.LeaseTimeToLiveRequest.Size() - n += 1 + l + sovLease(uint64(l)) - } - return n -} - -func (m *LeaseInternalResponse) Size() (n int) { - var l int - _ = l - if m.LeaseTimeToLiveResponse != nil { - l = m.LeaseTimeToLiveResponse.Size() - n += 1 + l + sovLease(uint64(l)) - } - return n -} - -func sovLease(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozLease(x uint64) (n int) { - return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseInternalRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseTimeToLiveRequest == nil { - m.LeaseTimeToLiveRequest = &etcdserverpb.LeaseTimeToLiveRequest{} - } - if err := m.LeaseTimeToLiveRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseInternalResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveResponse", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LeaseTimeToLiveResponse == nil { - m.LeaseTimeToLiveResponse = &etcdserverpb.LeaseTimeToLiveResponse{} - } - if err := m.LeaseTimeToLiveResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLease(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthLease - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipLease(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLease = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("lease.proto", fileDescriptorLease) } - -var fileDescriptorLease = []byte{ - // 233 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, - 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2, - 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45, - 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92, - 0x21, 0xea, 0x94, 0x34, 0xb9, 0x58, 0x7d, 0x40, 0x06, 0x09, 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48, - 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x31, 0x79, 0xba, 0x08, 0x09, 0x70, 0x31, 0x87, 0x84, 0xf8, - 0x48, 0x30, 0x81, 0x05, 0x40, 0x4c, 0xa5, 0x12, 0x2e, 0x11, 0xb0, 0x52, 0xcf, 0xbc, 0x92, 0xd4, - 0xa2, 0xbc, 0xc4, 0x9c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0xa1, 0x18, 0x2e, 0x31, 0xb0, - 0x78, 0x48, 0x66, 0x6e, 0x6a, 0x48, 0xbe, 0x4f, 0x66, 0x59, 0x2a, 0x54, 0x06, 0x6c, 0x1a, 0xb7, - 0x91, 0x8a, 0x1e, 0xb2, 0xdd, 0x7a, 0xd8, 0xd5, 0x06, 0xe1, 0x30, 0x43, 0xa9, 0x82, 0x4b, 0x14, - 0xcd, 0xd6, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa1, 0x78, 0x2e, 0x71, 0x0c, 0x2d, 0x10, 0x29, - 0xa8, 0xbd, 0xaa, 0x04, 0xec, 0x85, 0x28, 0x0e, 0xc2, 0x65, 0x8a, 0x93, 0xc4, 0x89, 0x87, 0x72, - 0x0c, 0x17, 0x1e, 0xca, 0x31, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, - 0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xc3, 0xce, 0x18, 0x10, 0x00, 0x00, 0xff, - 0xff, 0x9f, 0xf2, 0x42, 0xe0, 0x91, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/lease/lessor.go b/vendor/github.com/coreos/etcd/lease/lessor.go deleted file mode 100644 index 385bd76d73c..00000000000 --- a/vendor/github.com/coreos/etcd/lease/lessor.go +++ /dev/null @@ -1,602 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "encoding/binary" - "errors" - "math" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/coreos/etcd/lease/leasepb" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/monotime" -) - -const ( - // NoLease is a special LeaseID representing the absence of a lease. - NoLease = LeaseID(0) -) - -var ( - leaseBucketName = []byte("lease") - - forever = monotime.Time(math.MaxInt64) - - ErrNotPrimary = errors.New("not a primary lessor") - ErrLeaseNotFound = errors.New("lease not found") - ErrLeaseExists = errors.New("lease already exists") -) - -type LeaseID int64 - -// RangeDeleter defines an interface with Txn and DeleteRange method. -// We define this interface only for lessor to limit the number -// of methods of mvcc.KV to what lessor actually needs. -// -// Having a minimum interface makes testing easy. -type RangeDeleter interface { - // TxnBegin see comments on mvcc.KV - TxnBegin() int64 - // TxnEnd see comments on mvcc.KV - TxnEnd(txnID int64) error - // TxnDeleteRange see comments on mvcc.KV - TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) -} - -// Lessor owns leases. It can grant, revoke, renew and modify leases for lessee. -type Lessor interface { - // SetRangeDeleter sets the RangeDeleter to the Lessor. - // Lessor deletes the items in the revoked or expired lease from the - // the set RangeDeleter. - SetRangeDeleter(dr RangeDeleter) - - // Grant grants a lease that expires at least after TTL seconds. - Grant(id LeaseID, ttl int64) (*Lease, error) - // Revoke revokes a lease with given ID. The item attached to the - // given lease will be removed. If the ID does not exist, an error - // will be returned. - Revoke(id LeaseID) error - - // Attach attaches given leaseItem to the lease with given LeaseID. - // If the lease does not exist, an error will be returned. - Attach(id LeaseID, items []LeaseItem) error - - // GetLease returns LeaseID for given item. - // If no lease found, NoLease value will be returned. - GetLease(item LeaseItem) LeaseID - - // Detach detaches given leaseItem from the lease with given LeaseID. - // If the lease does not exist, an error will be returned. - Detach(id LeaseID, items []LeaseItem) error - - // Promote promotes the lessor to be the primary lessor. Primary lessor manages - // the expiration and renew of leases. - // Newly promoted lessor renew the TTL of all lease to extend + previous TTL. - Promote(extend time.Duration) - - // Demote demotes the lessor from being the primary lessor. - Demote() - - // Renew renews a lease with given ID. It returns the renewed TTL. If the ID does not exist, - // an error will be returned. - Renew(id LeaseID) (int64, error) - - // Lookup gives the lease at a given lease id, if any - Lookup(id LeaseID) *Lease - - // ExpiredLeasesC returns a chan that is used to receive expired leases. - ExpiredLeasesC() <-chan []*Lease - - // Recover recovers the lessor state from the given backend and RangeDeleter. - Recover(b backend.Backend, rd RangeDeleter) - - // Stop stops the lessor for managing leases. The behavior of calling Stop multiple - // times is undefined. - Stop() -} - -// lessor implements Lessor interface. -// TODO: use clockwork for testability. -type lessor struct { - mu sync.Mutex - - // demotec is set when the lessor is the primary. - // demotec will be closed if the lessor is demoted. - demotec chan struct{} - - // TODO: probably this should be a heap with a secondary - // id index. - // Now it is O(N) to loop over the leases to find expired ones. - // We want to make Grant, Revoke, and findExpiredLeases all O(logN) and - // Renew O(1). - // findExpiredLeases and Renew should be the most frequent operations. - leaseMap map[LeaseID]*Lease - - itemMap map[LeaseItem]LeaseID - - // When a lease expires, the lessor will delete the - // leased range (or key) by the RangeDeleter. - rd RangeDeleter - - // backend to persist leases. We only persist lease ID and expiry for now. - // The leased items can be recovered by iterating all the keys in kv. - b backend.Backend - - // minLeaseTTL is the minimum lease TTL that can be granted for a lease. Any - // requests for shorter TTLs are extended to the minimum TTL. - minLeaseTTL int64 - - expiredC chan []*Lease - // stopC is a channel whose closure indicates that the lessor should be stopped. - stopC chan struct{} - // doneC is a channel whose closure indicates that the lessor is stopped. - doneC chan struct{} -} - -func NewLessor(b backend.Backend, minLeaseTTL int64) Lessor { - return newLessor(b, minLeaseTTL) -} - -func newLessor(b backend.Backend, minLeaseTTL int64) *lessor { - l := &lessor{ - leaseMap: make(map[LeaseID]*Lease), - itemMap: make(map[LeaseItem]LeaseID), - b: b, - minLeaseTTL: minLeaseTTL, - // expiredC is a small buffered chan to avoid unnecessary blocking. - expiredC: make(chan []*Lease, 16), - stopC: make(chan struct{}), - doneC: make(chan struct{}), - } - l.initAndRecover() - - go l.runLoop() - - return l -} - -// isPrimary indicates if this lessor is the primary lessor. The primary -// lessor manages lease expiration and renew. -// -// in etcd, raft leader is the primary. Thus there might be two primary -// leaders at the same time (raft allows concurrent leader but with different term) -// for at most a leader election timeout. -// The old primary leader cannot affect the correctness since its proposal has a -// smaller term and will not be committed. -// -// TODO: raft follower do not forward lease management proposals. There might be a -// very small window (within second normally which depends on go scheduling) that -// a raft follow is the primary between the raft leader demotion and lessor demotion. -// Usually this should not be a problem. Lease should not be that sensitive to timing. -func (le *lessor) isPrimary() bool { - return le.demotec != nil -} - -func (le *lessor) SetRangeDeleter(rd RangeDeleter) { - le.mu.Lock() - defer le.mu.Unlock() - - le.rd = rd -} - -func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) { - if id == NoLease { - return nil, ErrLeaseNotFound - } - - // TODO: when lessor is under high load, it should give out lease - // with longer TTL to reduce renew load. - l := &Lease{ - ID: id, - ttl: ttl, - itemSet: make(map[LeaseItem]struct{}), - revokec: make(chan struct{}), - } - - le.mu.Lock() - defer le.mu.Unlock() - - if _, ok := le.leaseMap[id]; ok { - return nil, ErrLeaseExists - } - - if l.ttl < le.minLeaseTTL { - l.ttl = le.minLeaseTTL - } - - if le.isPrimary() { - l.refresh(0) - } else { - l.forever() - } - - le.leaseMap[id] = l - l.persistTo(le.b) - - return l, nil -} - -func (le *lessor) Revoke(id LeaseID) error { - le.mu.Lock() - - l := le.leaseMap[id] - if l == nil { - le.mu.Unlock() - return ErrLeaseNotFound - } - defer close(l.revokec) - // unlock before doing external work - le.mu.Unlock() - - if le.rd == nil { - return nil - } - - tid := le.rd.TxnBegin() - - // sort keys so deletes are in same order among all members, - // otherwise the backened hashes will be different - keys := l.Keys() - sort.StringSlice(keys).Sort() - for _, key := range keys { - _, _, err := le.rd.TxnDeleteRange(tid, []byte(key), nil) - if err != nil { - panic(err) - } - } - - le.mu.Lock() - defer le.mu.Unlock() - delete(le.leaseMap, l.ID) - // lease deletion needs to be in the same backend transaction with the - // kv deletion. Or we might end up with not executing the revoke or not - // deleting the keys if etcdserver fails in between. - le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID))) - - err := le.rd.TxnEnd(tid) - if err != nil { - panic(err) - } - - return nil -} - -// Renew renews an existing lease. If the given lease does not exist or -// has expired, an error will be returned. -func (le *lessor) Renew(id LeaseID) (int64, error) { - le.mu.Lock() - - unlock := func() { le.mu.Unlock() } - defer func() { unlock() }() - - if !le.isPrimary() { - // forward renew request to primary instead of returning error. - return -1, ErrNotPrimary - } - - demotec := le.demotec - - l := le.leaseMap[id] - if l == nil { - return -1, ErrLeaseNotFound - } - - if l.expired() { - le.mu.Unlock() - unlock = func() {} - select { - // A expired lease might be pending for revoking or going through - // quorum to be revoked. To be accurate, renew request must wait for the - // deletion to complete. - case <-l.revokec: - return -1, ErrLeaseNotFound - // The expired lease might fail to be revoked if the primary changes. - // The caller will retry on ErrNotPrimary. - case <-demotec: - return -1, ErrNotPrimary - case <-le.stopC: - return -1, ErrNotPrimary - } - } - - l.refresh(0) - return l.ttl, nil -} - -func (le *lessor) Lookup(id LeaseID) *Lease { - le.mu.Lock() - defer le.mu.Unlock() - return le.leaseMap[id] -} - -func (le *lessor) Promote(extend time.Duration) { - le.mu.Lock() - defer le.mu.Unlock() - - le.demotec = make(chan struct{}) - - // refresh the expiries of all leases. - for _, l := range le.leaseMap { - l.refresh(extend) - } -} - -func (le *lessor) Demote() { - le.mu.Lock() - defer le.mu.Unlock() - - // set the expiries of all leases to forever - for _, l := range le.leaseMap { - l.forever() - } - - if le.demotec != nil { - close(le.demotec) - le.demotec = nil - } -} - -// Attach attaches items to the lease with given ID. When the lease -// expires, the attached items will be automatically removed. -// If the given lease does not exist, an error will be returned. -func (le *lessor) Attach(id LeaseID, items []LeaseItem) error { - le.mu.Lock() - defer le.mu.Unlock() - - l := le.leaseMap[id] - if l == nil { - return ErrLeaseNotFound - } - - l.mu.Lock() - for _, it := range items { - l.itemSet[it] = struct{}{} - le.itemMap[it] = id - } - l.mu.Unlock() - return nil -} - -func (le *lessor) GetLease(item LeaseItem) LeaseID { - le.mu.Lock() - id := le.itemMap[item] - le.mu.Unlock() - return id -} - -// Detach detaches items from the lease with given ID. -// If the given lease does not exist, an error will be returned. -func (le *lessor) Detach(id LeaseID, items []LeaseItem) error { - le.mu.Lock() - defer le.mu.Unlock() - - l := le.leaseMap[id] - if l == nil { - return ErrLeaseNotFound - } - - l.mu.Lock() - for _, it := range items { - delete(l.itemSet, it) - delete(le.itemMap, it) - } - l.mu.Unlock() - return nil -} - -func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) { - le.mu.Lock() - defer le.mu.Unlock() - - le.b = b - le.rd = rd - le.leaseMap = make(map[LeaseID]*Lease) - le.itemMap = make(map[LeaseItem]LeaseID) - le.initAndRecover() -} - -func (le *lessor) ExpiredLeasesC() <-chan []*Lease { - return le.expiredC -} - -func (le *lessor) Stop() { - close(le.stopC) - <-le.doneC -} - -func (le *lessor) runLoop() { - defer close(le.doneC) - - for { - var ls []*Lease - - le.mu.Lock() - if le.isPrimary() { - ls = le.findExpiredLeases() - } - le.mu.Unlock() - - if len(ls) != 0 { - select { - case <-le.stopC: - return - case le.expiredC <- ls: - default: - // the receiver of expiredC is probably busy handling - // other stuff - // let's try this next time after 500ms - } - } - - select { - case <-time.After(500 * time.Millisecond): - case <-le.stopC: - return - } - } -} - -// findExpiredLeases loops all the leases in the leaseMap and returns the expired -// leases that needed to be revoked. -func (le *lessor) findExpiredLeases() []*Lease { - leases := make([]*Lease, 0, 16) - - for _, l := range le.leaseMap { - // TODO: probably should change to <= 100-500 millisecond to - // make up committing latency. - if l.expired() { - leases = append(leases, l) - } - } - - return leases -} - -func (le *lessor) initAndRecover() { - tx := le.b.BatchTx() - tx.Lock() - - tx.UnsafeCreateBucket(leaseBucketName) - _, vs := tx.UnsafeRange(leaseBucketName, int64ToBytes(0), int64ToBytes(math.MaxInt64), 0) - // TODO: copy vs and do decoding outside tx lock if lock contention becomes an issue. - for i := range vs { - var lpb leasepb.Lease - err := lpb.Unmarshal(vs[i]) - if err != nil { - tx.Unlock() - panic("failed to unmarshal lease proto item") - } - ID := LeaseID(lpb.ID) - if lpb.TTL < le.minLeaseTTL { - lpb.TTL = le.minLeaseTTL - } - le.leaseMap[ID] = &Lease{ - ID: ID, - ttl: lpb.TTL, - // itemSet will be filled in when recover key-value pairs - // set expiry to forever, refresh when promoted - itemSet: make(map[LeaseItem]struct{}), - expiry: forever, - revokec: make(chan struct{}), - } - } - tx.Unlock() - - le.b.ForceCommit() -} - -type Lease struct { - ID LeaseID - ttl int64 // time to live in seconds - // expiry is time when lease should expire; must be 64-bit aligned. - expiry monotime.Time - - // mu protects concurrent accesses to itemSet - mu sync.RWMutex - itemSet map[LeaseItem]struct{} - revokec chan struct{} -} - -func (l *Lease) expired() bool { - return l.Remaining() <= 0 -} - -func (l *Lease) persistTo(b backend.Backend) { - key := int64ToBytes(int64(l.ID)) - - lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.ttl)} - val, err := lpb.Marshal() - if err != nil { - panic("failed to marshal lease proto item") - } - - b.BatchTx().Lock() - b.BatchTx().UnsafePut(leaseBucketName, key, val) - b.BatchTx().Unlock() -} - -// TTL returns the TTL of the Lease. -func (l *Lease) TTL() int64 { - return l.ttl -} - -// refresh refreshes the expiry of the lease. -func (l *Lease) refresh(extend time.Duration) { - t := monotime.Now().Add(extend + time.Duration(l.ttl)*time.Second) - atomic.StoreUint64((*uint64)(&l.expiry), uint64(t)) -} - -// forever sets the expiry of lease to be forever. -func (l *Lease) forever() { atomic.StoreUint64((*uint64)(&l.expiry), uint64(forever)) } - -// Keys returns all the keys attached to the lease. -func (l *Lease) Keys() []string { - l.mu.RLock() - keys := make([]string, 0, len(l.itemSet)) - for k := range l.itemSet { - keys = append(keys, k.Key) - } - l.mu.RUnlock() - return keys -} - -// Remaining returns the remaining time of the lease. -func (l *Lease) Remaining() time.Duration { - t := monotime.Time(atomic.LoadUint64((*uint64)(&l.expiry))) - return time.Duration(t - monotime.Now()) -} - -type LeaseItem struct { - Key string -} - -func int64ToBytes(n int64) []byte { - bytes := make([]byte, 8) - binary.BigEndian.PutUint64(bytes, uint64(n)) - return bytes -} - -// FakeLessor is a fake implementation of Lessor interface. -// Used for testing only. -type FakeLessor struct{} - -func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {} - -func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil } - -func (fl *FakeLessor) Revoke(id LeaseID) error { return nil } - -func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil } - -func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 } -func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil } - -func (fl *FakeLessor) Promote(extend time.Duration) {} - -func (fl *FakeLessor) Demote() {} - -func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil } - -func (le *FakeLessor) Lookup(id LeaseID) *Lease { return nil } - -func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil } - -func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {} - -func (fl *FakeLessor) Stop() {} diff --git a/vendor/github.com/coreos/etcd/main.go b/vendor/github.com/coreos/etcd/main.go deleted file mode 100644 index 0b7357376e6..00000000000 --- a/vendor/github.com/coreos/etcd/main.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package main is a simple wrapper of the real etcd entrypoint package -// (located at github.com/coreos/etcd/etcdmain) to ensure that etcd is still -// "go getable"; e.g. `go get github.com/coreos/etcd` works as expected and -// builds a binary in $GOBIN/etcd -// -// This package should NOT be extended or modified in any way; to modify the -// etcd binary, work in the `github.com/coreos/etcd/etcdmain` package. -// -package main - -import "github.com/coreos/etcd/etcdmain" - -func main() { - etcdmain.Main() -} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go deleted file mode 100644 index e5e0028f94b..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import ( - "fmt" - "hash/crc32" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync" - "sync/atomic" - "time" - - "github.com/boltdb/bolt" - "github.com/coreos/pkg/capnslog" -) - -var ( - defaultBatchLimit = 10000 - defaultBatchInterval = 100 * time.Millisecond - - defragLimit = 10000 - - // InitialMmapSize is the initial size of the mmapped region. Setting this larger than - // the potential max db size can prevent writer from blocking reader. - // This only works for linux. - InitialMmapSize = int64(10 * 1024 * 1024 * 1024) - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend") -) - -const ( - // DefaultQuotaBytes is the number of bytes the backend Size may - // consume before exceeding the space quota. - DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB - // MaxQuotaBytes is the maximum number of bytes suggested for a backend - // quota. A larger quota may lead to degraded performance. - MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB -) - -type Backend interface { - BatchTx() BatchTx - Snapshot() Snapshot - Hash(ignores map[IgnoreKey]struct{}) (uint32, error) - // Size returns the current size of the backend. - Size() int64 - Defrag() error - ForceCommit() - Close() error -} - -type Snapshot interface { - // Size gets the size of the snapshot. - Size() int64 - // WriteTo writes the snapshot into the given writer. - WriteTo(w io.Writer) (n int64, err error) - // Close closes the snapshot. - Close() error -} - -type backend struct { - // size and commits are used with atomic operations so they must be - // 64-bit aligned, otherwise 32-bit tests will crash - - // size is the number of bytes in the backend - size int64 - // commits counts number of commits since start - commits int64 - - mu sync.RWMutex - db *bolt.DB - - batchInterval time.Duration - batchLimit int - batchTx *batchTx - - stopc chan struct{} - donec chan struct{} -} - -func New(path string, d time.Duration, limit int) Backend { - return newBackend(path, d, limit) -} - -func NewDefaultBackend(path string) Backend { - return newBackend(path, defaultBatchInterval, defaultBatchLimit) -} - -func newBackend(path string, d time.Duration, limit int) *backend { - db, err := bolt.Open(path, 0600, boltOpenOptions) - if err != nil { - plog.Panicf("cannot open database at %s (%v)", path, err) - } - - b := &backend{ - db: db, - - batchInterval: d, - batchLimit: limit, - - stopc: make(chan struct{}), - donec: make(chan struct{}), - } - b.batchTx = newBatchTx(b) - go b.run() - return b -} - -// BatchTx returns the current batch tx in coalescer. The tx can be used for read and -// write operations. The write result can be retrieved within the same tx immediately. -// The write result is isolated with other txs until the current one get committed. -func (b *backend) BatchTx() BatchTx { - return b.batchTx -} - -// ForceCommit forces the current batching tx to commit. -func (b *backend) ForceCommit() { - b.batchTx.Commit() -} - -func (b *backend) Snapshot() Snapshot { - b.batchTx.Commit() - - b.mu.RLock() - defer b.mu.RUnlock() - tx, err := b.db.Begin(false) - if err != nil { - plog.Fatalf("cannot begin tx (%s)", err) - } - return &snapshot{tx} -} - -type IgnoreKey struct { - Bucket string - Key string -} - -func (b *backend) Hash(ignores map[IgnoreKey]struct{}) (uint32, error) { - h := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - - b.mu.RLock() - defer b.mu.RUnlock() - err := b.db.View(func(tx *bolt.Tx) error { - c := tx.Cursor() - for next, _ := c.First(); next != nil; next, _ = c.Next() { - b := tx.Bucket(next) - if b == nil { - return fmt.Errorf("cannot get hash of bucket %s", string(next)) - } - h.Write(next) - b.ForEach(func(k, v []byte) error { - bk := IgnoreKey{Bucket: string(next), Key: string(k)} - if _, ok := ignores[bk]; !ok { - h.Write(k) - h.Write(v) - } - return nil - }) - } - return nil - }) - - if err != nil { - return 0, err - } - - return h.Sum32(), nil -} - -func (b *backend) Size() int64 { - return atomic.LoadInt64(&b.size) -} - -func (b *backend) run() { - defer close(b.donec) - t := time.NewTimer(b.batchInterval) - defer t.Stop() - for { - select { - case <-t.C: - case <-b.stopc: - b.batchTx.CommitAndStop() - return - } - b.batchTx.Commit() - t.Reset(b.batchInterval) - } -} - -func (b *backend) Close() error { - close(b.stopc) - <-b.donec - return b.db.Close() -} - -// Commits returns total number of commits since start -func (b *backend) Commits() int64 { - return atomic.LoadInt64(&b.commits) -} - -func (b *backend) Defrag() error { - err := b.defrag() - if err != nil { - return err - } - - // commit to update metadata like db.size - b.batchTx.Commit() - - return nil -} - -func (b *backend) defrag() error { - // TODO: make this non-blocking? - // lock batchTx to ensure nobody is using previous tx, and then - // close previous ongoing tx. - b.batchTx.Lock() - defer b.batchTx.Unlock() - - // lock database after lock tx to avoid deadlock. - b.mu.Lock() - defer b.mu.Unlock() - - b.batchTx.commit(true) - b.batchTx.tx = nil - - tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions) - if err != nil { - return err - } - - err = defragdb(b.db, tmpdb, defragLimit) - - if err != nil { - tmpdb.Close() - os.RemoveAll(tmpdb.Path()) - return err - } - - dbp := b.db.Path() - tdbp := tmpdb.Path() - - err = b.db.Close() - if err != nil { - plog.Fatalf("cannot close database (%s)", err) - } - err = tmpdb.Close() - if err != nil { - plog.Fatalf("cannot close database (%s)", err) - } - err = os.Rename(tdbp, dbp) - if err != nil { - plog.Fatalf("cannot rename database (%s)", err) - } - - b.db, err = bolt.Open(dbp, 0600, boltOpenOptions) - if err != nil { - plog.Panicf("cannot open database at %s (%v)", dbp, err) - } - b.batchTx.tx, err = b.db.Begin(true) - if err != nil { - plog.Fatalf("cannot begin tx (%s)", err) - } - - return nil -} - -func defragdb(odb, tmpdb *bolt.DB, limit int) error { - // open a tx on tmpdb for writes - tmptx, err := tmpdb.Begin(true) - if err != nil { - return err - } - - // open a tx on old db for read - tx, err := odb.Begin(false) - if err != nil { - return err - } - defer tx.Rollback() - - c := tx.Cursor() - - count := 0 - for next, _ := c.First(); next != nil; next, _ = c.Next() { - b := tx.Bucket(next) - if b == nil { - return fmt.Errorf("backend: cannot defrag bucket %s", string(next)) - } - - tmpb, berr := tmptx.CreateBucketIfNotExists(next) - tmpb.FillPercent = 0.9 // for seq write in for each - if berr != nil { - return berr - } - - b.ForEach(func(k, v []byte) error { - count++ - if count > limit { - err = tmptx.Commit() - if err != nil { - return err - } - tmptx, err = tmpdb.Begin(true) - if err != nil { - return err - } - tmpb = tmptx.Bucket(next) - tmpb.FillPercent = 0.9 // for seq write in for each - - count = 0 - } - return tmpb.Put(k, v) - }) - } - - return tmptx.Commit() -} - -// NewTmpBackend creates a backend implementation for testing. -func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) { - dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test") - if err != nil { - plog.Fatal(err) - } - tmpPath := filepath.Join(dir, "database") - return newBackend(tmpPath, batchInterval, batchLimit), tmpPath -} - -func NewDefaultTmpBackend() (*backend, string) { - return NewTmpBackend(defaultBatchInterval, defaultBatchLimit) -} - -type snapshot struct { - *bolt.Tx -} - -func (s *snapshot) Close() error { return s.Tx.Rollback() } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go deleted file mode 100644 index 04fea1e9477..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import ( - "bytes" - "sync" - "sync/atomic" - "time" - - "github.com/boltdb/bolt" -) - -type BatchTx interface { - Lock() - Unlock() - UnsafeCreateBucket(name []byte) - UnsafePut(bucketName []byte, key []byte, value []byte) - UnsafeSeqPut(bucketName []byte, key []byte, value []byte) - UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) - UnsafeDelete(bucketName []byte, key []byte) - UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error - Commit() - CommitAndStop() -} - -type batchTx struct { - sync.Mutex - tx *bolt.Tx - backend *backend - pending int -} - -func newBatchTx(backend *backend) *batchTx { - tx := &batchTx{backend: backend} - tx.Commit() - return tx -} - -func (t *batchTx) UnsafeCreateBucket(name []byte) { - _, err := t.tx.CreateBucket(name) - if err != nil && err != bolt.ErrBucketExists { - plog.Fatalf("cannot create bucket %s (%v)", name, err) - } - t.pending++ -} - -// UnsafePut must be called holding the lock on the tx. -func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) { - t.unsafePut(bucketName, key, value, false) -} - -// UnsafeSeqPut must be called holding the lock on the tx. -func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) { - t.unsafePut(bucketName, key, value, true) -} - -func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) { - bucket := t.tx.Bucket(bucketName) - if bucket == nil { - plog.Fatalf("bucket %s does not exist", bucketName) - } - if seq { - // it is useful to increase fill percent when the workloads are mostly append-only. - // this can delay the page split and reduce space usage. - bucket.FillPercent = 0.9 - } - if err := bucket.Put(key, value); err != nil { - plog.Fatalf("cannot put key into bucket (%v)", err) - } - t.pending++ -} - -// UnsafeRange must be called holding the lock on the tx. -func (t *batchTx) UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) { - bucket := t.tx.Bucket(bucketName) - if bucket == nil { - plog.Fatalf("bucket %s does not exist", bucketName) - } - - if len(endKey) == 0 { - if v := bucket.Get(key); v == nil { - return keys, vs - } else { - return append(keys, key), append(vs, v) - } - } - - c := bucket.Cursor() - for ck, cv := c.Seek(key); ck != nil && bytes.Compare(ck, endKey) < 0; ck, cv = c.Next() { - vs = append(vs, cv) - keys = append(keys, ck) - if limit > 0 && limit == int64(len(keys)) { - break - } - } - - return keys, vs -} - -// UnsafeDelete must be called holding the lock on the tx. -func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) { - bucket := t.tx.Bucket(bucketName) - if bucket == nil { - plog.Fatalf("bucket %s does not exist", bucketName) - } - err := bucket.Delete(key) - if err != nil { - plog.Fatalf("cannot delete key from bucket (%v)", err) - } - t.pending++ -} - -// UnsafeForEach must be called holding the lock on the tx. -func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { - b := t.tx.Bucket(bucketName) - if b == nil { - // bucket does not exist - return nil - } - return b.ForEach(visitor) -} - -// Commit commits a previous tx and begins a new writable one. -func (t *batchTx) Commit() { - t.Lock() - defer t.Unlock() - t.commit(false) -} - -// CommitAndStop commits the previous tx and do not create a new one. -func (t *batchTx) CommitAndStop() { - t.Lock() - defer t.Unlock() - t.commit(true) -} - -func (t *batchTx) Unlock() { - if t.pending >= t.backend.batchLimit { - t.commit(false) - t.pending = 0 - } - t.Mutex.Unlock() -} - -func (t *batchTx) commit(stop bool) { - var err error - // commit the last tx - if t.tx != nil { - if t.pending == 0 && !stop { - t.backend.mu.RLock() - defer t.backend.mu.RUnlock() - - // batchTx.commit(true) calls *bolt.Tx.Commit, which - // initializes *bolt.Tx.db and *bolt.Tx.meta as nil, - // and subsequent *bolt.Tx.Size() call panics. - // - // This nil pointer reference panic happens when: - // 1. batchTx.commit(false) from newBatchTx - // 2. batchTx.commit(true) from stopping backend - // 3. batchTx.commit(false) from inflight mvcc Hash call - // - // Check if db is nil to prevent this panic - if t.tx.DB() != nil { - atomic.StoreInt64(&t.backend.size, t.tx.Size()) - } - return - } - start := time.Now() - // gofail: var beforeCommit struct{} - err = t.tx.Commit() - // gofail: var afterCommit struct{} - commitDurations.Observe(time.Since(start).Seconds()) - atomic.AddInt64(&t.backend.commits, 1) - - t.pending = 0 - if err != nil { - plog.Fatalf("cannot commit tx (%s)", err) - } - } - - if stop { - return - } - - t.backend.mu.RLock() - defer t.backend.mu.RUnlock() - // begin a new tx - t.tx, err = t.backend.db.Begin(true) - if err != nil { - plog.Fatalf("cannot begin tx (%s)", err) - } - atomic.StoreInt64(&t.backend.size, t.tx.Size()) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go b/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go deleted file mode 100644 index 92019c18415..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux - -package backend - -import "github.com/boltdb/bolt" - -var boltOpenOptions *bolt.Options = nil diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go b/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go deleted file mode 100644 index 4ee9b05a77c..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import ( - "syscall" - - "github.com/boltdb/bolt" -) - -// syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead -// which can speed up entire-database read with boltdb. We want to -// enable MAP_POPULATE for faster key-value store recovery in storage -// package. If your kernel version is lower than 2.6.23 -// (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might -// silently ignore this flag. Please update your kernel to prevent this. -var boltOpenOptions = &bolt.Options{ - MmapFlags: syscall.MAP_POPULATE, - InitialMmapSize: int(InitialMmapSize), -} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/doc.go b/vendor/github.com/coreos/etcd/mvcc/backend/doc.go deleted file mode 100644 index 9cc42fa793c..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package backend defines a standard interface for etcd's backend MVCC storage. -package backend diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go deleted file mode 100644 index 34a56a91956..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import "github.com/prometheus/client_golang/prometheus" - -var ( - commitDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "disk", - Name: "backend_commit_duration_seconds", - Help: "The latency distributions of commit called by backend.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) -) - -func init() { - prometheus.MustRegister(commitDurations) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/doc.go b/vendor/github.com/coreos/etcd/mvcc/doc.go deleted file mode 100644 index ad5be03086f..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package mvcc defines etcd's stable MVCC storage. -package mvcc diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go deleted file mode 100644 index 397098a7ba7..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/index.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "sort" - "sync" - - "github.com/google/btree" -) - -type index interface { - Get(key []byte, atRev int64) (rev, created revision, ver int64, err error) - Range(key, end []byte, atRev int64) ([][]byte, []revision) - Put(key []byte, rev revision) - Tombstone(key []byte, rev revision) error - RangeSince(key, end []byte, rev int64) []revision - Compact(rev int64) map[revision]struct{} - Equal(b index) bool - Insert(ki *keyIndex) -} - -type treeIndex struct { - sync.RWMutex - tree *btree.BTree -} - -func newTreeIndex() index { - return &treeIndex{ - tree: btree.New(32), - } -} - -func (ti *treeIndex) Put(key []byte, rev revision) { - keyi := &keyIndex{key: key} - - ti.Lock() - defer ti.Unlock() - item := ti.tree.Get(keyi) - if item == nil { - keyi.put(rev.main, rev.sub) - ti.tree.ReplaceOrInsert(keyi) - return - } - okeyi := item.(*keyIndex) - okeyi.put(rev.main, rev.sub) -} - -func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { - keyi := &keyIndex{key: key} - - ti.RLock() - defer ti.RUnlock() - item := ti.tree.Get(keyi) - if item == nil { - return revision{}, revision{}, 0, ErrRevisionNotFound - } - - keyi = item.(*keyIndex) - return keyi.get(atRev) -} - -func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { - if end == nil { - rev, _, _, err := ti.Get(key, atRev) - if err != nil { - return nil, nil - } - return [][]byte{key}, []revision{rev} - } - - keyi := &keyIndex{key: key} - endi := &keyIndex{key: end} - - ti.RLock() - defer ti.RUnlock() - - ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { - if len(endi.key) > 0 && !item.Less(endi) { - return false - } - curKeyi := item.(*keyIndex) - rev, _, _, err := curKeyi.get(atRev) - if err != nil { - return true - } - revs = append(revs, rev) - keys = append(keys, curKeyi.key) - return true - }) - - return keys, revs -} - -func (ti *treeIndex) Tombstone(key []byte, rev revision) error { - keyi := &keyIndex{key: key} - - ti.Lock() - defer ti.Unlock() - item := ti.tree.Get(keyi) - if item == nil { - return ErrRevisionNotFound - } - - ki := item.(*keyIndex) - return ki.tombstone(rev.main, rev.sub) -} - -// RangeSince returns all revisions from key(including) to end(excluding) -// at or after the given rev. The returned slice is sorted in the order -// of revision. -func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { - ti.RLock() - defer ti.RUnlock() - - keyi := &keyIndex{key: key} - if end == nil { - item := ti.tree.Get(keyi) - if item == nil { - return nil - } - keyi = item.(*keyIndex) - return keyi.since(rev) - } - - endi := &keyIndex{key: end} - var revs []revision - ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool { - if len(endi.key) > 0 && !item.Less(endi) { - return false - } - curKeyi := item.(*keyIndex) - revs = append(revs, curKeyi.since(rev)...) - return true - }) - sort.Sort(revisions(revs)) - - return revs -} - -func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { - available := make(map[revision]struct{}) - var emptyki []*keyIndex - plog.Printf("store.index: compact %d", rev) - // TODO: do not hold the lock for long time? - // This is probably OK. Compacting 10M keys takes O(10ms). - ti.Lock() - defer ti.Unlock() - ti.tree.Ascend(compactIndex(rev, available, &emptyki)) - for _, ki := range emptyki { - item := ti.tree.Delete(ki) - if item == nil { - plog.Panic("store.index: unexpected delete failure during compaction") - } - } - return available -} - -func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyIndex) func(i btree.Item) bool { - return func(i btree.Item) bool { - keyi := i.(*keyIndex) - keyi.compact(rev, available) - if keyi.isEmpty() { - *emptyki = append(*emptyki, keyi) - } - return true - } -} - -func (a *treeIndex) Equal(bi index) bool { - b := bi.(*treeIndex) - - if a.tree.Len() != b.tree.Len() { - return false - } - - equal := true - - a.tree.Ascend(func(item btree.Item) bool { - aki := item.(*keyIndex) - bki := b.tree.Get(item).(*keyIndex) - if !aki.equal(bki) { - equal = false - return false - } - return true - }) - - return equal -} - -func (ti *treeIndex) Insert(ki *keyIndex) { - ti.Lock() - defer ti.Unlock() - ti.tree.ReplaceOrInsert(ki) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go deleted file mode 100644 index 983c64e2f6b..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/key_index.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "errors" - "fmt" - - "github.com/google/btree" -) - -var ( - ErrRevisionNotFound = errors.New("mvcc: revision not found") -) - -// keyIndex stores the revisions of a key in the backend. -// Each keyIndex has at least one key generation. -// Each generation might have several key versions. -// Tombstone on a key appends an tombstone version at the end -// of the current generation and creates a new empty generation. -// Each version of a key has an index pointing to the backend. -// -// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo" -// generate a keyIndex: -// key: "foo" -// rev: 5 -// generations: -// {empty} -// {4.0, 5.0(t)} -// {1.0, 2.0, 3.0(t)} -// -// Compact a keyIndex removes the versions with smaller or equal to -// rev except the largest one. If the generation becomes empty -// during compaction, it will be removed. if all the generations get -// removed, the keyIndex should be removed. - -// For example: -// compact(2) on the previous example -// generations: -// {empty} -// {4.0, 5.0(t)} -// {2.0, 3.0(t)} -// -// compact(4) -// generations: -// {empty} -// {4.0, 5.0(t)} -// -// compact(5): -// generations: -// {empty} -> key SHOULD be removed. -// -// compact(6): -// generations: -// {empty} -> key SHOULD be removed. -type keyIndex struct { - key []byte - modified revision // the main rev of the last modification - generations []generation -} - -// put puts a revision to the keyIndex. -func (ki *keyIndex) put(main int64, sub int64) { - rev := revision{main: main, sub: sub} - - if !rev.GreaterThan(ki.modified) { - plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified) - } - if len(ki.generations) == 0 { - ki.generations = append(ki.generations, generation{}) - } - g := &ki.generations[len(ki.generations)-1] - if len(g.revs) == 0 { // create a new key - keysGauge.Inc() - g.created = rev - } - g.revs = append(g.revs, rev) - g.ver++ - ki.modified = rev -} - -func (ki *keyIndex) restore(created, modified revision, ver int64) { - if len(ki.generations) != 0 { - plog.Panicf("store.keyindex: cannot restore non-empty keyIndex") - } - - ki.modified = modified - g := generation{created: created, ver: ver, revs: []revision{modified}} - ki.generations = append(ki.generations, g) - keysGauge.Inc() -} - -// tombstone puts a revision, pointing to a tombstone, to the keyIndex. -// It also creates a new empty generation in the keyIndex. -// It returns ErrRevisionNotFound when tombstone on an empty generation. -func (ki *keyIndex) tombstone(main int64, sub int64) error { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key)) - } - if ki.generations[len(ki.generations)-1].isEmpty() { - return ErrRevisionNotFound - } - ki.put(main, sub) - ki.generations = append(ki.generations, generation{}) - keysGauge.Dec() - return nil -} - -// get gets the modified, created revision and version of the key that satisfies the given atRev. -// Rev must be higher than or equal to the given atRev. -func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err error) { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) - } - g := ki.findGeneration(atRev) - if g.isEmpty() { - return revision{}, revision{}, 0, ErrRevisionNotFound - } - - n := g.walk(func(rev revision) bool { return rev.main > atRev }) - if n != -1 { - return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil - } - - return revision{}, revision{}, 0, ErrRevisionNotFound -} - -// since returns revisions since the given rev. Only the revision with the -// largest sub revision will be returned if multiple revisions have the same -// main revision. -func (ki *keyIndex) since(rev int64) []revision { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) - } - since := revision{rev, 0} - var gi int - // find the generations to start checking - for gi = len(ki.generations) - 1; gi > 0; gi-- { - g := ki.generations[gi] - if g.isEmpty() { - continue - } - if since.GreaterThan(g.created) { - break - } - } - - var revs []revision - var last int64 - for ; gi < len(ki.generations); gi++ { - for _, r := range ki.generations[gi].revs { - if since.GreaterThan(r) { - continue - } - if r.main == last { - // replace the revision with a new one that has higher sub value, - // because the original one should not be seen by external - revs[len(revs)-1] = r - continue - } - revs = append(revs, r) - last = r.main - } - } - return revs -} - -// compact compacts a keyIndex by removing the versions with smaller or equal -// revision than the given atRev except the largest one (If the largest one is -// a tombstone, it will not be kept). -// If a generation becomes empty during compaction, it will be removed. -func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { - if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key)) - } - - // walk until reaching the first revision that has an revision smaller or equal to - // the atRev. - // add it to the available map - f := func(rev revision) bool { - if rev.main <= atRev { - available[rev] = struct{}{} - return false - } - return true - } - - i, g := 0, &ki.generations[0] - // find first generation includes atRev or created after atRev - for i < len(ki.generations)-1 { - if tomb := g.revs[len(g.revs)-1].main; tomb > atRev { - break - } - i++ - g = &ki.generations[i] - } - - if !g.isEmpty() { - n := g.walk(f) - // remove the previous contents. - if n != -1 { - g.revs = g.revs[n:] - } - // remove any tombstone - if len(g.revs) == 1 && i != len(ki.generations)-1 { - delete(available, g.revs[0]) - i++ - } - } - // remove the previous generations. - ki.generations = ki.generations[i:] - return -} - -func (ki *keyIndex) isEmpty() bool { - return len(ki.generations) == 1 && ki.generations[0].isEmpty() -} - -// findGeneration finds out the generation of the keyIndex that the -// given rev belongs to. If the given rev is at the gap of two generations, -// which means that the key does not exist at the given rev, it returns nil. -func (ki *keyIndex) findGeneration(rev int64) *generation { - lastg := len(ki.generations) - 1 - cg := lastg - - for cg >= 0 { - if len(ki.generations[cg].revs) == 0 { - cg-- - continue - } - g := ki.generations[cg] - if cg != lastg { - if tomb := g.revs[len(g.revs)-1].main; tomb <= rev { - return nil - } - } - if g.revs[0].main <= rev { - return &ki.generations[cg] - } - cg-- - } - return nil -} - -func (a *keyIndex) Less(b btree.Item) bool { - return bytes.Compare(a.key, b.(*keyIndex).key) == -1 -} - -func (a *keyIndex) equal(b *keyIndex) bool { - if !bytes.Equal(a.key, b.key) { - return false - } - if a.modified != b.modified { - return false - } - if len(a.generations) != len(b.generations) { - return false - } - for i := range a.generations { - ag, bg := a.generations[i], b.generations[i] - if !ag.equal(bg) { - return false - } - } - return true -} - -func (ki *keyIndex) String() string { - var s string - for _, g := range ki.generations { - s += g.String() - } - return s -} - -// generation contains multiple revisions of a key. -type generation struct { - ver int64 - created revision // when the generation is created (put in first revision). - revs []revision -} - -func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 } - -// walk walks through the revisions in the generation in descending order. -// It passes the revision to the given function. -// walk returns until: 1. it finishes walking all pairs 2. the function returns false. -// walk returns the position at where it stopped. If it stopped after -// finishing walking, -1 will be returned. -func (g *generation) walk(f func(rev revision) bool) int { - l := len(g.revs) - for i := range g.revs { - ok := f(g.revs[l-i-1]) - if !ok { - return l - i - 1 - } - } - return -1 -} - -func (g *generation) String() string { - return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs) -} - -func (a generation) equal(b generation) bool { - if a.ver != b.ver { - return false - } - if len(a.revs) != len(b.revs) { - return false - } - - for i := range a.revs { - ar, br := a.revs[i], b.revs[i] - if ar != br { - return false - } - } - return true -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go deleted file mode 100644 index c851c8725e8..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kv.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -type RangeOptions struct { - Limit int64 - Rev int64 - Count bool -} - -type RangeResult struct { - KVs []mvccpb.KeyValue - Rev int64 - Count int -} - -type KV interface { - // Rev returns the current revision of the KV. - Rev() int64 - - // FirstRev returns the first revision of the KV. - // After a compaction, the first revision increases to the compaction - // revision. - FirstRev() int64 - - // Range gets the keys in the range at rangeRev. - // The returned rev is the current revision of the KV when the operation is executed. - // If rangeRev <=0, range gets the keys at currentRev. - // If `end` is nil, the request returns the key. - // If `end` is not nil and not empty, it gets the keys in range [key, range_end). - // If `end` is not nil and empty, it gets the keys greater than or equal to key. - // Limit limits the number of keys returned. - // If the required rev is compacted, ErrCompacted will be returned. - Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) - - // Put puts the given key, value into the store. Put also takes additional argument lease to - // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease - // id. - // A put also increases the rev of the store, and generates one event in the event history. - // The returned rev is the current revision of the KV when the operation is executed. - Put(key, value []byte, lease lease.LeaseID) (rev int64) - - // DeleteRange deletes the given range from the store. - // A deleteRange increases the rev of the store if any key in the range exists. - // The number of key deleted will be returned. - // The returned rev is the current revision of the KV when the operation is executed. - // It also generates one event for each key delete in the event history. - // if the `end` is nil, deleteRange deletes the key. - // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end). - DeleteRange(key, end []byte) (n, rev int64) - - // TxnBegin begins a txn. Only Txn prefixed operation can be executed, others will be blocked - // until txn ends. Only one on-going txn is allowed. - // TxnBegin returns an int64 txn ID. - // All txn prefixed operations with same txn ID will be done with the same rev. - TxnBegin() int64 - // TxnEnd ends the on-going txn with txn ID. If the on-going txn ID is not matched, error is returned. - TxnEnd(txnID int64) error - // TxnRange returns the current revision of the KV when the operation is executed. - TxnRange(txnID int64, key, end []byte, ro RangeOptions) (r *RangeResult, err error) - TxnPut(txnID int64, key, value []byte, lease lease.LeaseID) (rev int64, err error) - TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) - - // Compact frees all superseded keys with revisions less than rev. - Compact(rev int64) (<-chan struct{}, error) - - // Hash retrieves the hash of KV state and revision. - // This method is designed for consistency checking purpose. - Hash() (hash uint32, revision int64, err error) - - // Commit commits txns into the underlying backend. - Commit() - - // Restore restores the KV store from a backend. - Restore(b backend.Backend) error - Close() error -} - -// WatchableKV is a KV that can be watched. -type WatchableKV interface { - KV - Watchable -} - -// Watchable is the interface that wraps the NewWatchStream function. -type Watchable interface { - // NewWatchStream returns a WatchStream that can be used to - // watch events happened or happening on the KV. - NewWatchStream() WatchStream -} - -// ConsistentWatchableKV is a WatchableKV that understands the consistency -// algorithm and consistent index. -// If the consistent index of executing entry is not larger than the -// consistent index of ConsistentWatchableKV, all operations in -// this entry are skipped and return empty response. -type ConsistentWatchableKV interface { - WatchableKV - // ConsistentIndex returns the current consistent index of the KV. - ConsistentIndex() uint64 -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go deleted file mode 100644 index 28a18a06597..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore.go +++ /dev/null @@ -1,710 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "encoding/binary" - "errors" - "math" - "math/rand" - "sync" - "time" - - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/schedule" - "github.com/coreos/pkg/capnslog" - "golang.org/x/net/context" -) - -var ( - keyBucketName = []byte("key") - metaBucketName = []byte("meta") - - // markedRevBytesLen is the byte length of marked revision. - // The first `revBytesLen` bytes represents a normal revision. The last - // one byte is the mark. - markedRevBytesLen = revBytesLen + 1 - markBytePosition = markedRevBytesLen - 1 - markTombstone byte = 't' - - consistentIndexKeyName = []byte("consistent_index") - scheduledCompactKeyName = []byte("scheduledCompactRev") - finishedCompactKeyName = []byte("finishedCompactRev") - - ErrTxnIDMismatch = errors.New("mvcc: txn id mismatch") - ErrCompacted = errors.New("mvcc: required revision has been compacted") - ErrFutureRev = errors.New("mvcc: required revision is a future revision") - ErrCanceled = errors.New("mvcc: watcher is canceled") - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") -) - -// ConsistentIndexGetter is an interface that wraps the Get method. -// Consistent index is the offset of an entry in a consistent replicated log. -type ConsistentIndexGetter interface { - // ConsistentIndex returns the consistent index of current executing entry. - ConsistentIndex() uint64 -} - -type store struct { - mu sync.Mutex // guards the following - - ig ConsistentIndexGetter - - b backend.Backend - kvindex index - - le lease.Lessor - - currentRev revision - // the main revision of the last compaction - compactMainRev int64 - - tx backend.BatchTx - txnID int64 // tracks the current txnID to verify txn operations - txnModify bool - - // bytesBuf8 is a byte slice of length 8 - // to avoid a repetitive allocation in saveIndex. - bytesBuf8 []byte - - changes []mvccpb.KeyValue - fifoSched schedule.Scheduler - - stopc chan struct{} -} - -// NewStore returns a new store. It is useful to create a store inside -// mvcc pkg. It should only be used for testing externally. -func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store { - s := &store{ - b: b, - ig: ig, - kvindex: newTreeIndex(), - - le: le, - - currentRev: revision{main: 1}, - compactMainRev: -1, - - bytesBuf8: make([]byte, 8, 8), - fifoSched: schedule.NewFIFOScheduler(), - - stopc: make(chan struct{}), - } - - if s.le != nil { - s.le.SetRangeDeleter(s) - } - - tx := s.b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(keyBucketName) - tx.UnsafeCreateBucket(metaBucketName) - tx.Unlock() - s.b.ForceCommit() - - if err := s.restore(); err != nil { - // TODO: return the error instead of panic here? - panic("failed to recover store from backend") - } - - return s -} - -func (s *store) Rev() int64 { - s.mu.Lock() - defer s.mu.Unlock() - - return s.currentRev.main -} - -func (s *store) FirstRev() int64 { - s.mu.Lock() - defer s.mu.Unlock() - - return s.compactMainRev -} - -func (s *store) Put(key, value []byte, lease lease.LeaseID) int64 { - id := s.TxnBegin() - s.put(key, value, lease) - s.txnEnd(id) - - putCounter.Inc() - - return int64(s.currentRev.main) -} - -func (s *store) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - id := s.TxnBegin() - kvs, count, rev, err := s.rangeKeys(key, end, ro.Limit, ro.Rev, ro.Count) - s.txnEnd(id) - - rangeCounter.Inc() - - r = &RangeResult{ - KVs: kvs, - Count: count, - Rev: rev, - } - - return r, err -} - -func (s *store) DeleteRange(key, end []byte) (n, rev int64) { - id := s.TxnBegin() - n = s.deleteRange(key, end) - s.txnEnd(id) - - deleteCounter.Inc() - - return n, int64(s.currentRev.main) -} - -func (s *store) TxnBegin() int64 { - s.mu.Lock() - s.currentRev.sub = 0 - s.tx = s.b.BatchTx() - s.tx.Lock() - - s.txnID = rand.Int63() - return s.txnID -} - -func (s *store) TxnEnd(txnID int64) error { - err := s.txnEnd(txnID) - if err != nil { - return err - } - - txnCounter.Inc() - return nil -} - -// txnEnd is used for unlocking an internal txn. It does -// not increase the txnCounter. -func (s *store) txnEnd(txnID int64) error { - if txnID != s.txnID { - return ErrTxnIDMismatch - } - - // only update index if the txn modifies the mvcc state. - // read only txn might execute with one write txn concurrently, - // it should not write its index to mvcc. - if s.txnModify { - s.saveIndex() - } - s.txnModify = false - - s.tx.Unlock() - if s.currentRev.sub != 0 { - s.currentRev.main += 1 - } - s.currentRev.sub = 0 - - dbTotalSize.Set(float64(s.b.Size())) - s.mu.Unlock() - return nil -} - -func (s *store) TxnRange(txnID int64, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - if txnID != s.txnID { - return nil, ErrTxnIDMismatch - } - - kvs, count, rev, err := s.rangeKeys(key, end, ro.Limit, ro.Rev, ro.Count) - - r = &RangeResult{ - KVs: kvs, - Count: count, - Rev: rev, - } - return r, err -} - -func (s *store) TxnPut(txnID int64, key, value []byte, lease lease.LeaseID) (rev int64, err error) { - if txnID != s.txnID { - return 0, ErrTxnIDMismatch - } - - s.put(key, value, lease) - return int64(s.currentRev.main + 1), nil -} - -func (s *store) TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) { - if txnID != s.txnID { - return 0, 0, ErrTxnIDMismatch - } - - n = s.deleteRange(key, end) - if n != 0 || s.currentRev.sub != 0 { - rev = int64(s.currentRev.main + 1) - } else { - rev = int64(s.currentRev.main) - } - return n, rev, nil -} - -func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { - if ctx == nil || ctx.Err() != nil { - s.mu.Lock() - select { - case <-s.stopc: - default: - f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } - s.fifoSched.Schedule(f) - } - s.mu.Unlock() - return - } - close(ch) -} - -func (s *store) Compact(rev int64) (<-chan struct{}, error) { - s.mu.Lock() - defer s.mu.Unlock() - if rev <= s.compactMainRev { - ch := make(chan struct{}) - f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } - s.fifoSched.Schedule(f) - return ch, ErrCompacted - } - if rev > s.currentRev.main { - return nil, ErrFutureRev - } - - start := time.Now() - - s.compactMainRev = rev - - rbytes := newRevBytes() - revToBytes(revision{main: rev}, rbytes) - - tx := s.b.BatchTx() - tx.Lock() - tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes) - tx.Unlock() - // ensure that desired compaction is persisted - s.b.ForceCommit() - - keep := s.kvindex.Compact(rev) - ch := make(chan struct{}) - var j = func(ctx context.Context) { - if ctx.Err() != nil { - s.compactBarrier(ctx, ch) - return - } - if !s.scheduleCompaction(rev, keep) { - s.compactBarrier(nil, ch) - return - } - close(ch) - } - - s.fifoSched.Schedule(j) - - indexCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) - return ch, nil -} - -// DefaultIgnores is a map of keys to ignore in hash checking. -var DefaultIgnores map[backend.IgnoreKey]struct{} - -func init() { - DefaultIgnores = map[backend.IgnoreKey]struct{}{ - // consistent index might be changed due to v2 internal sync, which - // is not controllable by the user. - {Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {}, - } -} - -func (s *store) Hash() (uint32, int64, error) { - s.mu.Lock() - defer s.mu.Unlock() - s.b.ForceCommit() - - h, err := s.b.Hash(DefaultIgnores) - rev := s.currentRev.main - return h, rev, err -} - -func (s *store) Commit() { - s.mu.Lock() - defer s.mu.Unlock() - - s.tx = s.b.BatchTx() - s.tx.Lock() - s.saveIndex() - s.tx.Unlock() - s.b.ForceCommit() -} - -func (s *store) Restore(b backend.Backend) error { - s.mu.Lock() - defer s.mu.Unlock() - - close(s.stopc) - s.fifoSched.Stop() - - s.b = b - s.kvindex = newTreeIndex() - s.currentRev = revision{main: 1} - s.compactMainRev = -1 - s.tx = b.BatchTx() - s.txnID = -1 - s.fifoSched = schedule.NewFIFOScheduler() - s.stopc = make(chan struct{}) - - return s.restore() -} - -func (s *store) restore() error { - min, max := newRevBytes(), newRevBytes() - revToBytes(revision{main: 1}, min) - revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) - - keyToLease := make(map[string]lease.LeaseID) - - // use an unordered map to hold the temp index data to speed up - // the initial key index recovery. - // we will convert this unordered map into the tree index later. - unordered := make(map[string]*keyIndex, 100000) - - // restore index - tx := s.b.BatchTx() - tx.Lock() - _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) - if len(finishedCompactBytes) != 0 { - s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main - plog.Printf("restore compact to %d", s.compactMainRev) - } - - // TODO: limit N to reduce max memory usage - keys, vals := tx.UnsafeRange(keyBucketName, min, max, 0) - for i, key := range keys { - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vals[i]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - - rev := bytesToRev(key[:revBytesLen]) - - // restore index - switch { - case isTombstone(key): - if ki, ok := unordered[string(kv.Key)]; ok { - ki.tombstone(rev.main, rev.sub) - } - delete(keyToLease, string(kv.Key)) - - default: - ki, ok := unordered[string(kv.Key)] - if ok { - ki.put(rev.main, rev.sub) - } else { - ki = &keyIndex{key: kv.Key} - ki.restore(revision{kv.CreateRevision, 0}, rev, kv.Version) - unordered[string(kv.Key)] = ki - } - - if lid := lease.LeaseID(kv.Lease); lid != lease.NoLease { - keyToLease[string(kv.Key)] = lid - } else { - delete(keyToLease, string(kv.Key)) - } - } - - // update revision - s.currentRev = rev - } - - // restore the tree index from the unordered index. - for _, v := range unordered { - s.kvindex.Insert(v) - } - - // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. - // the correct revision should be set to compaction revision in the case, not the largest revision - // we have seen. - if s.currentRev.main < s.compactMainRev { - s.currentRev.main = s.compactMainRev - } - - for key, lid := range keyToLease { - if s.le == nil { - panic("no lessor to attach lease") - } - err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}}) - if err != nil { - plog.Errorf("unexpected Attach error: %v", err) - } - } - - _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) - scheduledCompact := int64(0) - if len(scheduledCompactBytes) != 0 { - scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main - if scheduledCompact <= s.compactMainRev { - scheduledCompact = 0 - } - } - - tx.Unlock() - - if scheduledCompact != 0 { - s.Compact(scheduledCompact) - plog.Printf("resume scheduled compaction at %d", scheduledCompact) - } - - return nil -} - -func (s *store) Close() error { - close(s.stopc) - s.fifoSched.Stop() - return nil -} - -func (a *store) Equal(b *store) bool { - if a.currentRev != b.currentRev { - return false - } - if a.compactMainRev != b.compactMainRev { - return false - } - return a.kvindex.Equal(b.kvindex) -} - -// range is a keyword in Go, add Keys suffix. -func (s *store) rangeKeys(key, end []byte, limit, rangeRev int64, countOnly bool) (kvs []mvccpb.KeyValue, count int, curRev int64, err error) { - curRev = int64(s.currentRev.main) - if s.currentRev.sub > 0 { - curRev += 1 - } - - if rangeRev > curRev { - return nil, -1, s.currentRev.main, ErrFutureRev - } - var rev int64 - if rangeRev <= 0 { - rev = curRev - } else { - rev = rangeRev - } - if rev < s.compactMainRev { - return nil, -1, 0, ErrCompacted - } - - _, revpairs := s.kvindex.Range(key, end, int64(rev)) - if len(revpairs) == 0 { - return nil, 0, curRev, nil - } - if countOnly { - return nil, len(revpairs), curRev, nil - } - - for _, revpair := range revpairs { - start, end := revBytesRange(revpair) - - _, vs := s.tx.UnsafeRange(keyBucketName, start, end, 0) - if len(vs) != 1 { - plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) - } - - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vs[0]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - kvs = append(kvs, kv) - if limit > 0 && len(kvs) >= int(limit) { - break - } - } - return kvs, len(revpairs), curRev, nil -} - -func (s *store) put(key, value []byte, leaseID lease.LeaseID) { - s.txnModify = true - - rev := s.currentRev.main + 1 - c := rev - oldLease := lease.NoLease - - // if the key exists before, use its previous created and - // get its previous leaseID - _, created, ver, err := s.kvindex.Get(key, rev) - if err == nil { - c = created.main - oldLease = s.le.GetLease(lease.LeaseItem{Key: string(key)}) - } - - ibytes := newRevBytes() - revToBytes(revision{main: rev, sub: s.currentRev.sub}, ibytes) - - ver = ver + 1 - kv := mvccpb.KeyValue{ - Key: key, - Value: value, - CreateRevision: c, - ModRevision: rev, - Version: ver, - Lease: int64(leaseID), - } - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - s.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - s.kvindex.Put(key, revision{main: rev, sub: s.currentRev.sub}) - s.changes = append(s.changes, kv) - s.currentRev.sub += 1 - - if oldLease != lease.NoLease { - if s.le == nil { - panic("no lessor to detach lease") - } - - err = s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - plog.Errorf("unexpected error from lease detach: %v", err) - } - } - - if leaseID != lease.NoLease { - if s.le == nil { - panic("no lessor to attach lease") - } - - err = s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - panic("unexpected error from lease Attach") - } - } -} - -func (s *store) deleteRange(key, end []byte) int64 { - s.txnModify = true - - rrev := s.currentRev.main - if s.currentRev.sub > 0 { - rrev += 1 - } - keys, revs := s.kvindex.Range(key, end, rrev) - - if len(keys) == 0 { - return 0 - } - - for i, key := range keys { - s.delete(key, revs[i]) - } - return int64(len(keys)) -} - -func (s *store) delete(key []byte, rev revision) { - mainrev := s.currentRev.main + 1 - - ibytes := newRevBytes() - revToBytes(revision{main: mainrev, sub: s.currentRev.sub}, ibytes) - ibytes = appendMarkTombstone(ibytes) - - kv := mvccpb.KeyValue{ - Key: key, - } - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - s.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - err = s.kvindex.Tombstone(key, revision{main: mainrev, sub: s.currentRev.sub}) - if err != nil { - plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) - } - s.changes = append(s.changes, kv) - s.currentRev.sub += 1 - - item := lease.LeaseItem{Key: string(key)} - leaseID := s.le.GetLease(item) - - if leaseID != lease.NoLease { - err = s.le.Detach(leaseID, []lease.LeaseItem{item}) - if err != nil { - plog.Errorf("cannot detach %v", err) - } - } -} - -func (s *store) getChanges() []mvccpb.KeyValue { - changes := s.changes - s.changes = make([]mvccpb.KeyValue, 0, 4) - return changes -} - -func (s *store) saveIndex() { - if s.ig == nil { - return - } - tx := s.tx - bs := s.bytesBuf8 - binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex()) - // put the index into the underlying backend - // tx has been locked in TxnBegin, so there is no need to lock it again - tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) -} - -func (s *store) ConsistentIndex() uint64 { - // TODO: cache index in a uint64 field? - tx := s.b.BatchTx() - tx.Lock() - defer tx.Unlock() - _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0) - if len(vs) == 0 { - return 0 - } - return binary.BigEndian.Uint64(vs[0]) -} - -// appendMarkTombstone appends tombstone mark to normal revision bytes. -func appendMarkTombstone(b []byte) []byte { - if len(b) != revBytesLen { - plog.Panicf("cannot append mark to non normal revision bytes") - } - return append(b, markTombstone) -} - -// isTombstone checks whether the revision bytes is a tombstone. -func isTombstone(b []byte) bool { - return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone -} - -// revBytesRange returns the range of revision bytes at -// the given revision. -func revBytesRange(rev revision) (start, end []byte) { - start = newRevBytes() - revToBytes(rev, start) - - end = newRevBytes() - endRev := revision{main: rev.main, sub: rev.sub + 1} - revToBytes(endRev, end) - - return start, end -} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go deleted file mode 100644 index bbd38f547f3..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "encoding/binary" - "time" -) - -func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool { - totalStart := time.Now() - defer dbCompactionTotalDurations.Observe(float64(time.Since(totalStart) / time.Millisecond)) - - end := make([]byte, 8) - binary.BigEndian.PutUint64(end, uint64(compactMainRev+1)) - - batchsize := int64(10000) - last := make([]byte, 8+1+8) - for { - var rev revision - - start := time.Now() - tx := s.b.BatchTx() - tx.Lock() - - keys, _ := tx.UnsafeRange(keyBucketName, last, end, batchsize) - for _, key := range keys { - rev = bytesToRev(key) - if _, ok := keep[rev]; !ok { - tx.UnsafeDelete(keyBucketName, key) - } - } - - if len(keys) < int(batchsize) { - rbytes := make([]byte, 8+1+8) - revToBytes(revision{main: compactMainRev}, rbytes) - tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes) - tx.Unlock() - plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart)) - return true - } - - // update last - revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last) - tx.Unlock() - dbCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) - - select { - case <-time.After(100 * time.Millisecond): - case <-s.stopc: - return false - } - } -} diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go deleted file mode 100644 index aa8af6aa552..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/metrics.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -var ( - rangeCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "range_total", - Help: "Total number of ranges seen by this member.", - }) - - putCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "put_total", - Help: "Total number of puts seen by this member.", - }) - - deleteCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "delete_total", - Help: "Total number of deletes seen by this member.", - }) - - txnCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "txn_total", - Help: "Total number of txns seen by this member.", - }) - - keysGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "keys_total", - Help: "Total number of keys.", - }) - - watchStreamGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "watch_stream_total", - Help: "Total number of watch streams.", - }) - - watcherGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "watcher_total", - Help: "Total number of watchers.", - }) - - slowWatcherGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "slow_watcher_total", - Help: "Total number of unsynced slow watchers.", - }) - - totalEventsCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "events_total", - Help: "Total number of events sent by this member.", - }) - - pendingEventsGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "pending_events_total", - Help: "Total number of pending events to be sent.", - }) - - indexCompactionPauseDurations = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "index_compaction_pause_duration_milliseconds", - Help: "Bucketed histogram of index compaction pause duration.", - // 0.5ms -> 1second - Buckets: prometheus.ExponentialBuckets(0.5, 2, 12), - }) - - dbCompactionPauseDurations = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_compaction_pause_duration_milliseconds", - Help: "Bucketed histogram of db compaction pause duration.", - // 1ms -> 4second - Buckets: prometheus.ExponentialBuckets(1, 2, 13), - }) - - dbCompactionTotalDurations = prometheus.NewHistogram( - prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_compaction_total_duration_milliseconds", - Help: "Bucketed histogram of db compaction total duration.", - // 100ms -> 800second - Buckets: prometheus.ExponentialBuckets(100, 2, 14), - }) - - dbTotalSize = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "db_total_size_in_bytes", - Help: "Total size of the underlying database in bytes.", - }) -) - -func init() { - prometheus.MustRegister(rangeCounter) - prometheus.MustRegister(putCounter) - prometheus.MustRegister(deleteCounter) - prometheus.MustRegister(txnCounter) - prometheus.MustRegister(keysGauge) - prometheus.MustRegister(watchStreamGauge) - prometheus.MustRegister(watcherGauge) - prometheus.MustRegister(slowWatcherGauge) - prometheus.MustRegister(totalEventsCounter) - prometheus.MustRegister(pendingEventsGauge) - prometheus.MustRegister(indexCompactionPauseDurations) - prometheus.MustRegister(dbCompactionPauseDurations) - prometheus.MustRegister(dbCompactionTotalDurations) - prometheus.MustRegister(dbTotalSize) -} - -// ReportEventReceived reports that an event is received. -// This function should be called when the external systems received an -// event from mvcc.Watcher. -func ReportEventReceived(n int) { - pendingEventsGauge.Sub(float64(n)) - totalEventsCounter.Add(float64(n)) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go deleted file mode 100644 index aa053f4e66e..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go +++ /dev/null @@ -1,735 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: kv.proto -// DO NOT EDIT! - -/* - Package mvccpb is a generated protocol buffer package. - - It is generated from these files: - kv.proto - - It has these top-level messages: - KeyValue - Event -*/ -package mvccpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Event_EventType int32 - -const ( - PUT Event_EventType = 0 - DELETE Event_EventType = 1 -) - -var Event_EventType_name = map[int32]string{ - 0: "PUT", - 1: "DELETE", -} -var Event_EventType_value = map[string]int32{ - "PUT": 0, - "DELETE": 1, -} - -func (x Event_EventType) String() string { - return proto.EnumName(Event_EventType_name, int32(x)) -} -func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} } - -type KeyValue struct { - // key is the key in bytes. An empty key is not allowed. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // create_revision is the revision of last creation on this key. - CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"` - // mod_revision is the revision of last modification on this key. - ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"` - // version is the version of the key. A deletion resets - // the version to zero and any modification of the key - // increases its version. - Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - // value is the value held by the key, in bytes. - Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` - // lease is the ID of the lease that attached to key. - // When the attached lease expires, the key will be deleted. - // If lease is 0, then no lease is attached to the key. - Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} } - -type Event struct { - // type is the kind of event. If type is a PUT, it indicates - // new data has been stored to the key. If type is a DELETE, - // it indicates the key was deleted. - Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"` - // kv holds the KeyValue for the event. - // A PUT event contains current kv pair. - // A PUT event with kv.Version=1 indicates the creation of a key. - // A DELETE/EXPIRE event contains the deleted key with - // its modification revision set to the revision of deletion. - Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` - // prev_kv holds the key-value pair before the event happens. - PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` -} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} } - -func init() { - proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") - proto.RegisterType((*Event)(nil), "mvccpb.Event") - proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) -} -func (m *KeyValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Key) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - } - if m.CreateRevision != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) - } - if m.ModRevision != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) - } - if m.Version != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Version)) - } - if len(m.Value) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - } - if m.Lease != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Lease)) - } - return i, nil -} - -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Type != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Type)) - } - if m.Kv != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size())) - n1, err := m.Kv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.PrevKv != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size())) - n2, err := m.PrevKv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func encodeFixed64Kv(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Kv(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintKv(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *KeyValue) Size() (n int) { - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - if m.CreateRevision != 0 { - n += 1 + sovKv(uint64(m.CreateRevision)) - } - if m.ModRevision != 0 { - n += 1 + sovKv(uint64(m.ModRevision)) - } - if m.Version != 0 { - n += 1 + sovKv(uint64(m.Version)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - if m.Lease != 0 { - n += 1 + sovKv(uint64(m.Lease)) - } - return n -} - -func (m *Event) Size() (n int) { - var l int - _ = l - if m.Type != 0 { - n += 1 + sovKv(uint64(m.Type)) - } - if m.Kv != nil { - l = m.Kv.Size() - n += 1 + l + sovKv(uint64(l)) - } - if m.PrevKv != nil { - l = m.PrevKv.Size() - n += 1 + l + sovKv(uint64(l)) - } - return n -} - -func sovKv(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozKv(x uint64) (n int) { - return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *KeyValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType) - } - m.CreateRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreateRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType) - } - m.ModRevision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ModRevision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - m.Lease = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lease |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= (Event_EventType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kv == nil { - m.Kv = &KeyValue{} - } - if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PrevKv == nil { - m.PrevKv = &KeyValue{} - } - if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipKv(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthKv - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipKv(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } - -var fileDescriptorKv = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, - 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, - 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, - 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, - 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, - 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, - 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, - 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, - 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, - 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, - 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, - 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, - 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, - 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, - 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, - 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, - 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, - 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, - 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/mvcc/revision.go b/vendor/github.com/coreos/etcd/mvcc/revision.go deleted file mode 100644 index 5fa35a1c2a2..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/revision.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import "encoding/binary" - -// revBytesLen is the byte length of a normal revision. -// First 8 bytes is the revision.main in big-endian format. The 9th byte -// is a '_'. The last 8 bytes is the revision.sub in big-endian format. -const revBytesLen = 8 + 1 + 8 - -// A revision indicates modification of the key-value space. -// The set of changes that share same main revision changes the key-value space atomically. -type revision struct { - // main is the main revision of a set of changes that happen atomically. - main int64 - - // sub is the the sub revision of a change in a set of changes that happen - // atomically. Each change has different increasing sub revision in that - // set. - sub int64 -} - -func (a revision) GreaterThan(b revision) bool { - if a.main > b.main { - return true - } - if a.main < b.main { - return false - } - return a.sub > b.sub -} - -func newRevBytes() []byte { - return make([]byte, revBytesLen, markedRevBytesLen) -} - -func revToBytes(rev revision, bytes []byte) { - binary.BigEndian.PutUint64(bytes, uint64(rev.main)) - bytes[8] = '_' - binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub)) -} - -func bytesToRev(bytes []byte) revision { - return revision{ - main: int64(binary.BigEndian.Uint64(bytes[0:8])), - sub: int64(binary.BigEndian.Uint64(bytes[9:])), - } -} - -type revisions []revision - -func (a revisions) Len() int { return len(a) } -func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) } -func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/coreos/etcd/mvcc/util.go b/vendor/github.com/coreos/etcd/mvcc/util.go deleted file mode 100644 index 8a0df0bfcc3..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/util.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "encoding/binary" - - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -func UpdateConsistentIndex(be backend.Backend, index uint64) { - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - - var oldi uint64 - _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0) - if len(vs) != 0 { - oldi = binary.BigEndian.Uint64(vs[0]) - } - - if index <= oldi { - return - } - - bs := make([]byte, 8) - binary.BigEndian.PutUint64(bs, index) - tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) -} - -func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { - ibytes := newRevBytes() - revToBytes(revision{main: kv.ModRevision}, ibytes) - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - be.BatchTx().Lock() - be.BatchTx().UnsafePut(keyBucketName, ibytes, d) - be.BatchTx().Unlock() -} diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go deleted file mode 100644 index dbb79bcb693..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go +++ /dev/null @@ -1,577 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "sync" - "time" - - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -const ( - // chanBufLen is the length of the buffered chan - // for sending out watched events. - // TODO: find a good buf value. 1024 is just a random one that - // seems to be reasonable. - chanBufLen = 1024 - - // maxWatchersPerSync is the number of watchers to sync in a single batch - maxWatchersPerSync = 512 -) - -type watchable interface { - watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) - progress(w *watcher) - rev() int64 -} - -type watchableStore struct { - mu sync.Mutex - - *store - - // victims are watcher batches that were blocked on the watch channel - victims []watcherBatch - victimc chan struct{} - - // contains all unsynced watchers that needs to sync with events that have happened - unsynced watcherGroup - - // contains all synced watchers that are in sync with the progress of the store. - // The key of the map is the key that the watcher watches on. - synced watcherGroup - - stopc chan struct{} - wg sync.WaitGroup -} - -// cancelFunc updates unsynced and synced maps when running -// cancel operations. -type cancelFunc func() - -func New(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV { - return newWatchableStore(b, le, ig) -} - -func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore { - s := &watchableStore{ - store: NewStore(b, le, ig), - victimc: make(chan struct{}, 1), - unsynced: newWatcherGroup(), - synced: newWatcherGroup(), - stopc: make(chan struct{}), - } - if s.le != nil { - // use this store as the deleter so revokes trigger watch events - s.le.SetRangeDeleter(s) - } - s.wg.Add(2) - go s.syncWatchersLoop() - go s.syncVictimsLoop() - return s -} - -func (s *watchableStore) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - s.mu.Lock() - defer s.mu.Unlock() - - rev = s.store.Put(key, value, lease) - changes := s.store.getChanges() - if len(changes) != 1 { - plog.Panicf("unexpected len(changes) != 1 after put") - } - - ev := mvccpb.Event{ - Type: mvccpb.PUT, - Kv: &changes[0], - } - s.notify(rev, []mvccpb.Event{ev}) - return rev -} - -func (s *watchableStore) DeleteRange(key, end []byte) (n, rev int64) { - s.mu.Lock() - defer s.mu.Unlock() - - n, rev = s.store.DeleteRange(key, end) - changes := s.store.getChanges() - - if len(changes) != int(n) { - plog.Panicf("unexpected len(changes) != n after deleteRange") - } - - if n == 0 { - return n, rev - } - - evs := make([]mvccpb.Event, n) - for i := range changes { - evs[i] = mvccpb.Event{ - Type: mvccpb.DELETE, - Kv: &changes[i]} - evs[i].Kv.ModRevision = rev - } - s.notify(rev, evs) - return n, rev -} - -func (s *watchableStore) TxnBegin() int64 { - s.mu.Lock() - return s.store.TxnBegin() -} - -func (s *watchableStore) TxnEnd(txnID int64) error { - err := s.store.TxnEnd(txnID) - if err != nil { - return err - } - - changes := s.getChanges() - if len(changes) == 0 { - s.mu.Unlock() - return nil - } - - rev := s.store.Rev() - evs := make([]mvccpb.Event, len(changes)) - for i, change := range changes { - switch change.CreateRevision { - case 0: - evs[i] = mvccpb.Event{ - Type: mvccpb.DELETE, - Kv: &changes[i]} - evs[i].Kv.ModRevision = rev - default: - evs[i] = mvccpb.Event{ - Type: mvccpb.PUT, - Kv: &changes[i]} - } - } - - s.notify(rev, evs) - s.mu.Unlock() - - return nil -} - -func (s *watchableStore) Close() error { - close(s.stopc) - s.wg.Wait() - return s.store.Close() -} - -func (s *watchableStore) NewWatchStream() WatchStream { - watchStreamGauge.Inc() - return &watchStream{ - watchable: s, - ch: make(chan WatchResponse, chanBufLen), - cancels: make(map[WatchID]cancelFunc), - watchers: make(map[WatchID]*watcher), - } -} - -func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) { - s.mu.Lock() - defer s.mu.Unlock() - - wa := &watcher{ - key: key, - end: end, - minRev: startRev, - id: id, - ch: ch, - fcs: fcs, - } - - s.store.mu.Lock() - synced := startRev > s.store.currentRev.main || startRev == 0 - if synced { - wa.minRev = s.store.currentRev.main + 1 - if startRev > wa.minRev { - wa.minRev = startRev - } - } - s.store.mu.Unlock() - if synced { - s.synced.add(wa) - } else { - slowWatcherGauge.Inc() - s.unsynced.add(wa) - } - watcherGauge.Inc() - - return wa, func() { s.cancelWatcher(wa) } -} - -// cancelWatcher removes references of the watcher from the watchableStore -func (s *watchableStore) cancelWatcher(wa *watcher) { - for { - s.mu.Lock() - - if s.unsynced.delete(wa) { - slowWatcherGauge.Dec() - break - } else if s.synced.delete(wa) { - break - } else if wa.compacted { - break - } - - if !wa.victim { - panic("watcher not victim but not in watch groups") - } - - var victimBatch watcherBatch - for _, wb := range s.victims { - if wb[wa] != nil { - victimBatch = wb - break - } - } - if victimBatch != nil { - slowWatcherGauge.Dec() - delete(victimBatch, wa) - break - } - - // victim being processed so not accessible; retry - s.mu.Unlock() - time.Sleep(time.Millisecond) - } - - watcherGauge.Dec() - s.mu.Unlock() -} - -// syncWatchersLoop syncs the watcher in the unsynced map every 100ms. -func (s *watchableStore) syncWatchersLoop() { - defer s.wg.Done() - - for { - s.mu.Lock() - st := time.Now() - lastUnsyncedWatchers := s.unsynced.size() - s.syncWatchers() - unsyncedWatchers := s.unsynced.size() - s.mu.Unlock() - syncDuration := time.Since(st) - - waitDuration := 100 * time.Millisecond - // more work pending? - if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers { - // be fair to other store operations by yielding time taken - waitDuration = syncDuration - } - - select { - case <-time.After(waitDuration): - case <-s.stopc: - return - } - } -} - -// syncVictimsLoop tries to write precomputed watcher responses to -// watchers that had a blocked watcher channel -func (s *watchableStore) syncVictimsLoop() { - defer s.wg.Done() - - for { - for s.moveVictims() != 0 { - // try to update all victim watchers - } - s.mu.Lock() - isEmpty := len(s.victims) == 0 - s.mu.Unlock() - - var tickc <-chan time.Time - if !isEmpty { - tickc = time.After(10 * time.Millisecond) - } - - select { - case <-tickc: - case <-s.victimc: - case <-s.stopc: - return - } - } -} - -// moveVictims tries to update watches with already pending event data -func (s *watchableStore) moveVictims() (moved int) { - s.mu.Lock() - victims := s.victims - s.victims = nil - s.mu.Unlock() - - var newVictim watcherBatch - for _, wb := range victims { - // try to send responses again - for w, eb := range wb { - // watcher has observed the store up to, but not including, w.minRev - rev := w.minRev - 1 - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - if newVictim == nil { - newVictim = make(watcherBatch) - } - newVictim[w] = eb - continue - } - moved++ - } - - // assign completed victim watchers to unsync/sync - s.mu.Lock() - s.store.mu.Lock() - curRev := s.store.currentRev.main - for w, eb := range wb { - if newVictim != nil && newVictim[w] != nil { - // couldn't send watch response; stays victim - continue - } - w.victim = false - if eb.moreRev != 0 { - w.minRev = eb.moreRev - } - if w.minRev <= curRev { - s.unsynced.add(w) - } else { - slowWatcherGauge.Dec() - s.synced.add(w) - } - } - s.store.mu.Unlock() - s.mu.Unlock() - } - - if len(newVictim) > 0 { - s.mu.Lock() - s.victims = append(s.victims, newVictim) - s.mu.Unlock() - } - - return moved -} - -// syncWatchers syncs unsynced watchers by: -// 1. choose a set of watchers from the unsynced watcher group -// 2. iterate over the set to get the minimum revision and remove compacted watchers -// 3. use minimum revision to get all key-value pairs and send those events to watchers -// 4. remove synced watchers in set from unsynced group and move to synced group -func (s *watchableStore) syncWatchers() { - if s.unsynced.size() == 0 { - return - } - - s.store.mu.Lock() - defer s.store.mu.Unlock() - - // in order to find key-value pairs from unsynced watchers, we need to - // find min revision index, and these revisions can be used to - // query the backend store of key-value pairs - curRev := s.store.currentRev.main - compactionRev := s.store.compactMainRev - wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) - minBytes, maxBytes := newRevBytes(), newRevBytes() - revToBytes(revision{main: minRev}, minBytes) - revToBytes(revision{main: curRev + 1}, maxBytes) - - // UnsafeRange returns keys and values. And in boltdb, keys are revisions. - // values are actual key-value pairs in backend. - tx := s.store.b.BatchTx() - tx.Lock() - revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) - evs := kvsToEvents(wg, revs, vs) - tx.Unlock() - - var victims watcherBatch - wb := newWatcherBatch(wg, evs) - for w := range wg.watchers { - w.minRev = curRev + 1 - - eb, ok := wb[w] - if !ok { - // bring un-notified watcher to synced - s.synced.add(w) - s.unsynced.delete(w) - continue - } - - if eb.moreRev != 0 { - w.minRev = eb.moreRev - } - - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - if victims == nil { - victims = make(watcherBatch) - } - w.victim = true - } - - if w.victim { - victims[w] = eb - } else { - if eb.moreRev != 0 { - // stay unsynced; more to read - continue - } - s.synced.add(w) - } - s.unsynced.delete(w) - } - s.addVictim(victims) - - vsz := 0 - for _, v := range s.victims { - vsz += len(v) - } - slowWatcherGauge.Set(float64(s.unsynced.size() + vsz)) -} - -// kvsToEvents gets all events for the watchers from all key-value pairs -func kvsToEvents(wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { - for i, v := range vals { - var kv mvccpb.KeyValue - if err := kv.Unmarshal(v); err != nil { - plog.Panicf("cannot unmarshal event: %v", err) - } - - if !wg.contains(string(kv.Key)) { - continue - } - - ty := mvccpb.PUT - if isTombstone(revs[i]) { - ty = mvccpb.DELETE - // patch in mod revision so watchers won't skip - kv.ModRevision = bytesToRev(revs[i]).main - } - evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty}) - } - return evs -} - -// notify notifies the fact that given event at the given rev just happened to -// watchers that watch on the key of the event. -func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { - var victim watcherBatch - for w, eb := range newWatcherBatch(&s.synced, evs) { - if eb.revs != 1 { - plog.Panicf("unexpected multiple revisions in notification") - } - - if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { - pendingEventsGauge.Add(float64(len(eb.evs))) - } else { - // move slow watcher to victims - w.minRev = rev + 1 - if victim == nil { - victim = make(watcherBatch) - } - w.victim = true - victim[w] = eb - s.synced.delete(w) - slowWatcherGauge.Inc() - } - } - s.addVictim(victim) -} - -func (s *watchableStore) addVictim(victim watcherBatch) { - if victim == nil { - return - } - s.victims = append(s.victims, victim) - select { - case s.victimc <- struct{}{}: - default: - } -} - -func (s *watchableStore) rev() int64 { return s.store.Rev() } - -func (s *watchableStore) progress(w *watcher) { - s.mu.Lock() - defer s.mu.Unlock() - - if _, ok := s.synced.watchers[w]; ok { - w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) - // If the ch is full, this watcher is receiving events. - // We do not need to send progress at all. - } -} - -type watcher struct { - // the watcher key - key []byte - // end indicates the end of the range to watch. - // If end is set, the watcher is on a range. - end []byte - - // victim is set when ch is blocked and undergoing victim processing - victim bool - - // compacted is set when the watcher is removed because of compaction - compacted bool - - // minRev is the minimum revision update the watcher will accept - minRev int64 - id WatchID - - fcs []FilterFunc - // a chan to send out the watch response. - // The chan might be shared with other watchers. - ch chan<- WatchResponse -} - -func (w *watcher) send(wr WatchResponse) bool { - progressEvent := len(wr.Events) == 0 - - if len(w.fcs) != 0 { - ne := make([]mvccpb.Event, 0, len(wr.Events)) - for i := range wr.Events { - filtered := false - for _, filter := range w.fcs { - if filter(wr.Events[i]) { - filtered = true - break - } - } - if !filtered { - ne = append(ne, wr.Events[i]) - } - } - wr.Events = ne - } - - // if all events are filtered out, we should send nothing. - if !progressEvent && len(wr.Events) == 0 { - return true - } - select { - case w.ch <- wr: - return true - default: - return false - } -} diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher.go b/vendor/github.com/coreos/etcd/mvcc/watcher.go deleted file mode 100644 index 9468d42695c..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watcher.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "bytes" - "errors" - "sync" - - "github.com/coreos/etcd/mvcc/mvccpb" -) - -var ( - ErrWatcherNotExist = errors.New("mvcc: watcher does not exist") -) - -type WatchID int64 - -// FilterFunc returns true if the given event should be filtered out. -type FilterFunc func(e mvccpb.Event) bool - -type WatchStream interface { - // Watch creates a watcher. The watcher watches the events happening or - // happened on the given key or range [key, end) from the given startRev. - // - // The whole event history can be watched unless compacted. - // If `startRev` <=0, watch observes events after currentRev. - // - // The returned `id` is the ID of this watcher. It appears as WatchID - // in events that are sent to the created watcher through stream channel. - // - Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID - - // Chan returns a chan. All watch response will be sent to the returned chan. - Chan() <-chan WatchResponse - - // RequestProgress requests the progress of the watcher with given ID. The response - // will only be sent if the watcher is currently synced. - // The responses will be sent through the WatchRespone Chan attached - // with this stream to ensure correct ordering. - // The responses contains no events. The revision in the response is the progress - // of the watchers since the watcher is currently synced. - RequestProgress(id WatchID) - - // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be - // returned. - Cancel(id WatchID) error - - // Close closes Chan and release all related resources. - Close() - - // Rev returns the current revision of the KV the stream watches on. - Rev() int64 -} - -type WatchResponse struct { - // WatchID is the WatchID of the watcher this response sent to. - WatchID WatchID - - // Events contains all the events that needs to send. - Events []mvccpb.Event - - // Revision is the revision of the KV when the watchResponse is created. - // For a normal response, the revision should be the same as the last - // modified revision inside Events. For a delayed response to a unsynced - // watcher, the revision is greater than the last modified revision - // inside Events. - Revision int64 - - // CompactRevision is set when the watcher is cancelled due to compaction. - CompactRevision int64 -} - -// watchStream contains a collection of watchers that share -// one streaming chan to send out watched events and other control events. -type watchStream struct { - watchable watchable - ch chan WatchResponse - - mu sync.Mutex // guards fields below it - // nextID is the ID pre-allocated for next new watcher in this stream - nextID WatchID - closed bool - cancels map[WatchID]cancelFunc - watchers map[WatchID]*watcher -} - -// Watch creates a new watcher in the stream and returns its WatchID. -// TODO: return error if ws is closed? -func (ws *watchStream) Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID { - // prevent wrong range where key >= end lexicographically - // watch request with 'WithFromKey' has empty-byte range end - if len(end) != 0 && bytes.Compare(key, end) != -1 { - return -1 - } - - ws.mu.Lock() - defer ws.mu.Unlock() - if ws.closed { - return -1 - } - - id := ws.nextID - ws.nextID++ - - w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...) - - ws.cancels[id] = c - ws.watchers[id] = w - return id -} - -func (ws *watchStream) Chan() <-chan WatchResponse { - return ws.ch -} - -func (ws *watchStream) Cancel(id WatchID) error { - ws.mu.Lock() - cancel, ok := ws.cancels[id] - ok = ok && !ws.closed - if ok { - delete(ws.cancels, id) - delete(ws.watchers, id) - } - ws.mu.Unlock() - if !ok { - return ErrWatcherNotExist - } - cancel() - return nil -} - -func (ws *watchStream) Close() { - ws.mu.Lock() - defer ws.mu.Unlock() - - for _, cancel := range ws.cancels { - cancel() - } - ws.closed = true - close(ws.ch) - watchStreamGauge.Dec() -} - -func (ws *watchStream) Rev() int64 { - ws.mu.Lock() - defer ws.mu.Unlock() - return ws.watchable.rev() -} - -func (ws *watchStream) RequestProgress(id WatchID) { - ws.mu.Lock() - w, ok := ws.watchers[id] - ws.mu.Unlock() - if !ok { - return - } - ws.watchable.progress(w) -} diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go deleted file mode 100644 index 2710c1cc940..00000000000 --- a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mvcc - -import ( - "math" - - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/adt" -) - -var ( - // watchBatchMaxRevs is the maximum distinct revisions that - // may be sent to an unsynced watcher at a time. Declared as - // var instead of const for testing purposes. - watchBatchMaxRevs = 1000 -) - -type eventBatch struct { - // evs is a batch of revision-ordered events - evs []mvccpb.Event - // revs is the minimum unique revisions observed for this batch - revs int - // moreRev is first revision with more events following this batch - moreRev int64 -} - -func (eb *eventBatch) add(ev mvccpb.Event) { - if eb.revs > watchBatchMaxRevs { - // maxed out batch size - return - } - - if len(eb.evs) == 0 { - // base case - eb.revs = 1 - eb.evs = append(eb.evs, ev) - return - } - - // revision accounting - ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision - evRev := ev.Kv.ModRevision - if evRev > ebRev { - eb.revs++ - if eb.revs > watchBatchMaxRevs { - eb.moreRev = evRev - return - } - } - - eb.evs = append(eb.evs, ev) -} - -type watcherBatch map[*watcher]*eventBatch - -func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) { - eb := wb[w] - if eb == nil { - eb = &eventBatch{} - wb[w] = eb - } - eb.add(ev) -} - -// newWatcherBatch maps watchers to their matched events. It enables quick -// events look up by watcher. -func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch { - if len(wg.watchers) == 0 { - return nil - } - - wb := make(watcherBatch) - for _, ev := range evs { - for w := range wg.watcherSetByKey(string(ev.Kv.Key)) { - if ev.Kv.ModRevision >= w.minRev { - // don't double notify - wb.add(w, ev) - } - } - } - return wb -} - -type watcherSet map[*watcher]struct{} - -func (w watcherSet) add(wa *watcher) { - if _, ok := w[wa]; ok { - panic("add watcher twice!") - } - w[wa] = struct{}{} -} - -func (w watcherSet) union(ws watcherSet) { - for wa := range ws { - w.add(wa) - } -} - -func (w watcherSet) delete(wa *watcher) { - if _, ok := w[wa]; !ok { - panic("removing missing watcher!") - } - delete(w, wa) -} - -type watcherSetByKey map[string]watcherSet - -func (w watcherSetByKey) add(wa *watcher) { - set := w[string(wa.key)] - if set == nil { - set = make(watcherSet) - w[string(wa.key)] = set - } - set.add(wa) -} - -func (w watcherSetByKey) delete(wa *watcher) bool { - k := string(wa.key) - if v, ok := w[k]; ok { - if _, ok := v[wa]; ok { - delete(v, wa) - if len(v) == 0 { - // remove the set; nothing left - delete(w, k) - } - return true - } - } - return false -} - -// watcherGroup is a collection of watchers organized by their ranges -type watcherGroup struct { - // keyWatchers has the watchers that watch on a single key - keyWatchers watcherSetByKey - // ranges has the watchers that watch a range; it is sorted by interval - ranges adt.IntervalTree - // watchers is the set of all watchers - watchers watcherSet -} - -func newWatcherGroup() watcherGroup { - return watcherGroup{ - keyWatchers: make(watcherSetByKey), - watchers: make(watcherSet), - } -} - -// add puts a watcher in the group. -func (wg *watcherGroup) add(wa *watcher) { - wg.watchers.add(wa) - if wa.end == nil { - wg.keyWatchers.add(wa) - return - } - - // interval already registered? - ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) - if iv := wg.ranges.Find(ivl); iv != nil { - iv.Val.(watcherSet).add(wa) - return - } - - // not registered, put in interval tree - ws := make(watcherSet) - ws.add(wa) - wg.ranges.Insert(ivl, ws) -} - -// contains is whether the given key has a watcher in the group. -func (wg *watcherGroup) contains(key string) bool { - _, ok := wg.keyWatchers[key] - return ok || wg.ranges.Contains(adt.NewStringAffinePoint(key)) -} - -// size gives the number of unique watchers in the group. -func (wg *watcherGroup) size() int { return len(wg.watchers) } - -// delete removes a watcher from the group. -func (wg *watcherGroup) delete(wa *watcher) bool { - if _, ok := wg.watchers[wa]; !ok { - return false - } - wg.watchers.delete(wa) - if wa.end == nil { - wg.keyWatchers.delete(wa) - return true - } - - ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end)) - iv := wg.ranges.Find(ivl) - if iv == nil { - return false - } - - ws := iv.Val.(watcherSet) - delete(ws, wa) - if len(ws) == 0 { - // remove interval missing watchers - if ok := wg.ranges.Delete(ivl); !ok { - panic("could not remove watcher from interval tree") - } - } - - return true -} - -// choose selects watchers from the watcher group to update -func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) { - if len(wg.watchers) < maxWatchers { - return wg, wg.chooseAll(curRev, compactRev) - } - ret := newWatcherGroup() - for w := range wg.watchers { - if maxWatchers <= 0 { - break - } - maxWatchers-- - ret.add(w) - } - return &ret, ret.chooseAll(curRev, compactRev) -} - -func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 { - minRev := int64(math.MaxInt64) - for w := range wg.watchers { - if w.minRev > curRev { - panic("watcher current revision should not exceed current revision") - } - if w.minRev < compactRev { - select { - case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}: - w.compacted = true - wg.delete(w) - default: - // retry next time - } - continue - } - if minRev > w.minRev { - minRev = w.minRev - } - } - return minRev -} - -// watcherSetByKey gets the set of watchers that receive events on the given key. -func (wg *watcherGroup) watcherSetByKey(key string) watcherSet { - wkeys := wg.keyWatchers[key] - wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key)) - - // zero-copy cases - switch { - case len(wranges) == 0: - // no need to merge ranges or copy; reuse single-key set - return wkeys - case len(wranges) == 0 && len(wkeys) == 0: - return nil - case len(wranges) == 1 && len(wkeys) == 0: - return wranges[0].Val.(watcherSet) - } - - // copy case - ret := make(watcherSet) - ret.union(wg.keyWatchers[key]) - for _, item := range wranges { - ret.union(item.Val.(watcherSet)) - } - return ret -} diff --git a/vendor/github.com/coreos/etcd/pkg/adt/doc.go b/vendor/github.com/coreos/etcd/pkg/adt/doc.go deleted file mode 100644 index 1a9559145b3..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/adt/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package adt implements useful abstract data types. -package adt diff --git a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go deleted file mode 100644 index 6edbe593fb4..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package adt - -import ( - "math" -) - -// Comparable is an interface for trichotomic comparisons. -type Comparable interface { - // Compare gives the result of a 3-way comparison - // a.Compare(b) = 1 => a > b - // a.Compare(b) = 0 => a == b - // a.Compare(b) = -1 => a < b - Compare(c Comparable) int -} - -type rbcolor int - -const ( - black rbcolor = iota - red -) - -// Interval implements a Comparable interval [begin, end) -// TODO: support different sorts of intervals: (a,b), [a,b], (a, b] -type Interval struct { - Begin Comparable - End Comparable -} - -// Compare on an interval gives == if the interval overlaps. -func (ivl *Interval) Compare(c Comparable) int { - ivl2 := c.(*Interval) - ivbCmpBegin := ivl.Begin.Compare(ivl2.Begin) - ivbCmpEnd := ivl.Begin.Compare(ivl2.End) - iveCmpBegin := ivl.End.Compare(ivl2.Begin) - - // ivl is left of ivl2 - if ivbCmpBegin < 0 && iveCmpBegin <= 0 { - return -1 - } - - // iv is right of iv2 - if ivbCmpEnd >= 0 { - return 1 - } - - return 0 -} - -type intervalNode struct { - // iv is the interval-value pair entry. - iv IntervalValue - // max endpoint of all descendent nodes. - max Comparable - // left and right are sorted by low endpoint of key interval - left, right *intervalNode - // parent is the direct ancestor of the node - parent *intervalNode - c rbcolor -} - -func (x *intervalNode) color() rbcolor { - if x == nil { - return black - } - return x.c -} - -func (n *intervalNode) height() int { - if n == nil { - return 0 - } - ld := n.left.height() - rd := n.right.height() - if ld < rd { - return rd + 1 - } - return ld + 1 -} - -func (x *intervalNode) min() *intervalNode { - for x.left != nil { - x = x.left - } - return x -} - -// successor is the next in-order node in the tree -func (x *intervalNode) successor() *intervalNode { - if x.right != nil { - return x.right.min() - } - y := x.parent - for y != nil && x == y.right { - x = y - y = y.parent - } - return y -} - -// updateMax updates the maximum values for a node and its ancestors -func (x *intervalNode) updateMax() { - for x != nil { - oldmax := x.max - max := x.iv.Ivl.End - if x.left != nil && x.left.max.Compare(max) > 0 { - max = x.left.max - } - if x.right != nil && x.right.max.Compare(max) > 0 { - max = x.right.max - } - if oldmax.Compare(max) == 0 { - break - } - x.max = max - x = x.parent - } -} - -type nodeVisitor func(n *intervalNode) bool - -// visit will call a node visitor on each node that overlaps the given interval -func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) { - if x == nil { - return - } - v := iv.Compare(&x.iv.Ivl) - switch { - case v < 0: - x.left.visit(iv, nv) - case v > 0: - maxiv := Interval{x.iv.Ivl.Begin, x.max} - if maxiv.Compare(iv) == 0 { - x.left.visit(iv, nv) - x.right.visit(iv, nv) - } - default: - nv(x) - x.left.visit(iv, nv) - x.right.visit(iv, nv) - } -} - -type IntervalValue struct { - Ivl Interval - Val interface{} -} - -// IntervalTree represents a (mostly) textbook implementation of the -// "Introduction to Algorithms" (Cormen et al, 2nd ed.) chapter 13 red-black tree -// and chapter 14.3 interval tree with search supporting "stabbing queries". -type IntervalTree struct { - root *intervalNode - count int -} - -// Delete removes the node with the given interval from the tree, returning -// true if a node is in fact removed. -func (ivt *IntervalTree) Delete(ivl Interval) bool { - z := ivt.find(ivl) - if z == nil { - return false - } - - y := z - if z.left != nil && z.right != nil { - y = z.successor() - } - - x := y.left - if x == nil { - x = y.right - } - if x != nil { - x.parent = y.parent - } - - if y.parent == nil { - ivt.root = x - } else { - if y == y.parent.left { - y.parent.left = x - } else { - y.parent.right = x - } - y.parent.updateMax() - } - if y != z { - z.iv = y.iv - z.updateMax() - } - - if y.color() == black && x != nil { - ivt.deleteFixup(x) - } - - ivt.count-- - return true -} - -func (ivt *IntervalTree) deleteFixup(x *intervalNode) { - for x != ivt.root && x.color() == black && x.parent != nil { - if x == x.parent.left { - w := x.parent.right - if w.color() == red { - w.c = black - x.parent.c = red - ivt.rotateLeft(x.parent) - w = x.parent.right - } - if w == nil { - break - } - if w.left.color() == black && w.right.color() == black { - w.c = red - x = x.parent - } else { - if w.right.color() == black { - w.left.c = black - w.c = red - ivt.rotateRight(w) - w = x.parent.right - } - w.c = x.parent.color() - x.parent.c = black - w.right.c = black - ivt.rotateLeft(x.parent) - x = ivt.root - } - } else { - // same as above but with left and right exchanged - w := x.parent.left - if w.color() == red { - w.c = black - x.parent.c = red - ivt.rotateRight(x.parent) - w = x.parent.left - } - if w == nil { - break - } - if w.left.color() == black && w.right.color() == black { - w.c = red - x = x.parent - } else { - if w.left.color() == black { - w.right.c = black - w.c = red - ivt.rotateLeft(w) - w = x.parent.left - } - w.c = x.parent.color() - x.parent.c = black - w.left.c = black - ivt.rotateRight(x.parent) - x = ivt.root - } - } - } - if x != nil { - x.c = black - } -} - -// Insert adds a node with the given interval into the tree. -func (ivt *IntervalTree) Insert(ivl Interval, val interface{}) { - var y *intervalNode - z := &intervalNode{iv: IntervalValue{ivl, val}, max: ivl.End, c: red} - x := ivt.root - for x != nil { - y = x - if z.iv.Ivl.Begin.Compare(x.iv.Ivl.Begin) < 0 { - x = x.left - } else { - x = x.right - } - } - - z.parent = y - if y == nil { - ivt.root = z - } else { - if z.iv.Ivl.Begin.Compare(y.iv.Ivl.Begin) < 0 { - y.left = z - } else { - y.right = z - } - y.updateMax() - } - z.c = red - ivt.insertFixup(z) - ivt.count++ -} - -func (ivt *IntervalTree) insertFixup(z *intervalNode) { - for z.parent != nil && z.parent.parent != nil && z.parent.color() == red { - if z.parent == z.parent.parent.left { - y := z.parent.parent.right - if y.color() == red { - y.c = black - z.parent.c = black - z.parent.parent.c = red - z = z.parent.parent - } else { - if z == z.parent.right { - z = z.parent - ivt.rotateLeft(z) - } - z.parent.c = black - z.parent.parent.c = red - ivt.rotateRight(z.parent.parent) - } - } else { - // same as then with left/right exchanged - y := z.parent.parent.left - if y.color() == red { - y.c = black - z.parent.c = black - z.parent.parent.c = red - z = z.parent.parent - } else { - if z == z.parent.left { - z = z.parent - ivt.rotateRight(z) - } - z.parent.c = black - z.parent.parent.c = red - ivt.rotateLeft(z.parent.parent) - } - } - } - ivt.root.c = black -} - -// rotateLeft moves x so it is left of its right child -func (ivt *IntervalTree) rotateLeft(x *intervalNode) { - y := x.right - x.right = y.left - if y.left != nil { - y.left.parent = x - } - x.updateMax() - ivt.replaceParent(x, y) - y.left = x - y.updateMax() -} - -// rotateLeft moves x so it is right of its left child -func (ivt *IntervalTree) rotateRight(x *intervalNode) { - if x == nil { - return - } - y := x.left - x.left = y.right - if y.right != nil { - y.right.parent = x - } - x.updateMax() - ivt.replaceParent(x, y) - y.right = x - y.updateMax() -} - -// replaceParent replaces x's parent with y -func (ivt *IntervalTree) replaceParent(x *intervalNode, y *intervalNode) { - y.parent = x.parent - if x.parent == nil { - ivt.root = y - } else { - if x == x.parent.left { - x.parent.left = y - } else { - x.parent.right = y - } - x.parent.updateMax() - } - x.parent = y -} - -// Len gives the number of elements in the tree -func (ivt *IntervalTree) Len() int { return ivt.count } - -// Height is the number of levels in the tree; one node has height 1. -func (ivt *IntervalTree) Height() int { return ivt.root.height() } - -// MaxHeight is the expected maximum tree height given the number of nodes -func (ivt *IntervalTree) MaxHeight() int { - return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5) -} - -// IntervalVisitor is used on tree searchs; return false to stop searching. -type IntervalVisitor func(n *IntervalValue) bool - -// Visit calls a visitor function on every tree node intersecting the given interval. -func (ivt *IntervalTree) Visit(ivl Interval, ivv IntervalVisitor) { - ivt.root.visit(&ivl, func(n *intervalNode) bool { return ivv(&n.iv) }) -} - -// find the exact node for a given interval -func (ivt *IntervalTree) find(ivl Interval) (ret *intervalNode) { - f := func(n *intervalNode) bool { - if n.iv.Ivl != ivl { - return true - } - ret = n - return false - } - ivt.root.visit(&ivl, f) - return ret -} - -// Find gets the IntervalValue for the node matching the given interval -func (ivt *IntervalTree) Find(ivl Interval) (ret *IntervalValue) { - n := ivt.find(ivl) - if n == nil { - return nil - } - return &n.iv -} - -// Contains returns true if there is some tree node intersecting the given interval. -func (ivt *IntervalTree) Contains(iv Interval) bool { - x := ivt.root - for x != nil && iv.Compare(&x.iv.Ivl) != 0 { - if x.left != nil && x.left.max.Compare(iv.Begin) > 0 { - x = x.left - } else { - x = x.right - } - } - return x != nil -} - -// Stab returns a slice with all elements in the tree intersecting the interval. -func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) { - if ivt.count == 0 { - return nil - } - f := func(n *IntervalValue) bool { ivs = append(ivs, n); return true } - ivt.Visit(iv, f) - return ivs -} - -type StringComparable string - -func (s StringComparable) Compare(c Comparable) int { - sc := c.(StringComparable) - if s < sc { - return -1 - } - if s > sc { - return 1 - } - return 0 -} - -func NewStringInterval(begin, end string) Interval { - return Interval{StringComparable(begin), StringComparable(end)} -} - -func NewStringPoint(s string) Interval { - return Interval{StringComparable(s), StringComparable(s + "\x00")} -} - -// StringAffineComparable treats "" as > all other strings -type StringAffineComparable string - -func (s StringAffineComparable) Compare(c Comparable) int { - sc := c.(StringAffineComparable) - - if len(s) == 0 { - if len(sc) == 0 { - return 0 - } - return 1 - } - if len(sc) == 0 { - return -1 - } - - if s < sc { - return -1 - } - if s > sc { - return 1 - } - return 0 -} - -func NewStringAffineInterval(begin, end string) Interval { - return Interval{StringAffineComparable(begin), StringAffineComparable(end)} -} -func NewStringAffinePoint(s string) Interval { - return NewStringAffineInterval(s, s+"\x00") -} - -func NewInt64Interval(a int64, b int64) Interval { - return Interval{Int64Comparable(a), Int64Comparable(b)} -} - -func NewInt64Point(a int64) Interval { - return Interval{Int64Comparable(a), Int64Comparable(a + 1)} -} - -type Int64Comparable int64 - -func (v Int64Comparable) Compare(c Comparable) int { - vc := c.(Int64Comparable) - cmp := v - vc - if cmp < 0 { - return -1 - } - if cmp > 0 { - return 1 - } - return 0 -} diff --git a/vendor/github.com/coreos/etcd/pkg/contention/contention.go b/vendor/github.com/coreos/etcd/pkg/contention/contention.go deleted file mode 100644 index 26ce9a2f347..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/contention/contention.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package contention - -import ( - "sync" - "time" -) - -// TimeoutDetector detects routine starvations by -// observing the actual time duration to finish an action -// or between two events that should happen in a fixed -// interval. If the observed duration is longer than -// the expectation, the detector will report the result. -type TimeoutDetector struct { - mu sync.Mutex // protects all - maxDuration time.Duration - // map from event to time - // time is the last seen time of the event. - records map[uint64]time.Time -} - -// NewTimeoutDetector creates the TimeoutDetector. -func NewTimeoutDetector(maxDuration time.Duration) *TimeoutDetector { - return &TimeoutDetector{ - maxDuration: maxDuration, - records: make(map[uint64]time.Time), - } -} - -// Reset resets the NewTimeoutDetector. -func (td *TimeoutDetector) Reset() { - td.mu.Lock() - defer td.mu.Unlock() - - td.records = make(map[uint64]time.Time) -} - -// Observe observes an event for given id. It returns false and exceeded duration -// if the interval is longer than the expectation. -func (td *TimeoutDetector) Observe(which uint64) (bool, time.Duration) { - td.mu.Lock() - defer td.mu.Unlock() - - ok := true - now := time.Now() - exceed := time.Duration(0) - - if pt, found := td.records[which]; found { - exceed = now.Sub(pt) - td.maxDuration - if exceed > 0 { - ok = false - } - } - td.records[which] = now - return ok, exceed -} diff --git a/vendor/github.com/coreos/etcd/pkg/contention/doc.go b/vendor/github.com/coreos/etcd/pkg/contention/doc.go deleted file mode 100644 index daf452219e0..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/contention/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package contention provides facilities for detecting system contention. -package contention diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go b/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go deleted file mode 100644 index 0323b2d34c6..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cpuutil provides facilities for detecting cpu-specific features. -package cpuutil diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go b/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go deleted file mode 100644 index 6ab898d4b59..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cpuutil - -import ( - "encoding/binary" - "unsafe" -) - -const intWidth int = int(unsafe.Sizeof(0)) - -var byteOrder binary.ByteOrder - -// ByteOrder returns the byte order for the CPU's native endianness. -func ByteOrder() binary.ByteOrder { return byteOrder } - -func init() { - var i int = 0x1 - if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 { - byteOrder = binary.BigEndian - } else { - byteOrder = binary.LittleEndian - } -} diff --git a/vendor/github.com/coreos/etcd/pkg/crc/crc.go b/vendor/github.com/coreos/etcd/pkg/crc/crc.go deleted file mode 100644 index 4b998a48455..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/crc/crc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package crc provides utility function for cyclic redundancy check -// algorithms. -package crc - -import ( - "hash" - "hash/crc32" -) - -// The size of a CRC-32 checksum in bytes. -const Size = 4 - -type digest struct { - crc uint32 - tab *crc32.Table -} - -// New creates a new hash.Hash32 computing the CRC-32 checksum -// using the polynomial represented by the Table. -// Modified by xiangli to take a prevcrc. -func New(prev uint32, tab *crc32.Table) hash.Hash32 { return &digest{prev, tab} } - -func (d *digest) Size() int { return Size } - -func (d *digest) BlockSize() int { return 1 } - -func (d *digest) Reset() { d.crc = 0 } - -func (d *digest) Write(p []byte) (n int, err error) { - d.crc = crc32.Update(d.crc, d.tab, p) - return len(p), nil -} - -func (d *digest) Sum32() uint32 { return d.crc } - -func (d *digest) Sum(in []byte) []byte { - s := d.Sum32() - return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go deleted file mode 100644 index 58a77dfc1a9..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package fileutil - -import "os" - -// OpenDir opens a directory for syncing. -func OpenDir(path string) (*os.File, error) { return os.Open(path) } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go deleted file mode 100644 index c123395c004..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows - -package fileutil - -import ( - "os" - "syscall" -) - -// OpenDir opens a directory in windows with write access for syncing. -func OpenDir(path string) (*os.File, error) { - fd, err := openDir(path) - if err != nil { - return nil, err - } - return os.NewFile(uintptr(fd), path), nil -} - -func openDir(path string) (fd syscall.Handle, err error) { - if len(path) == 0 { - return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND - } - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return syscall.InvalidHandle, err - } - access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) - sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) - createmode := uint32(syscall.OPEN_EXISTING) - fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) - return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go deleted file mode 100644 index aad40b75904..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package fileutil implements utility functions related to files and paths. -package fileutil - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" - - "github.com/coreos/pkg/capnslog" -) - -const ( - // PrivateFileMode grants owner to read/write a file. - PrivateFileMode = 0600 - // PrivateDirMode grants owner to make/remove files inside the directory. - PrivateDirMode = 0700 -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil") -) - -// IsDirWriteable checks if dir is writable by writing and removing a file -// to dir. It returns nil if dir is writable. -func IsDirWriteable(dir string) error { - f := filepath.Join(dir, ".touch") - if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { - return err - } - return os.Remove(f) -} - -// ReadDir returns the filenames in the given directory in sorted order. -func ReadDir(dirpath string) ([]string, error) { - dir, err := os.Open(dirpath) - if err != nil { - return nil, err - } - defer dir.Close() - names, err := dir.Readdirnames(-1) - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory -// does not exists. TouchDirAll also ensures the given directory is writable. -func TouchDirAll(dir string) error { - // If path is already a directory, MkdirAll does nothing - // and returns nil. - err := os.MkdirAll(dir, PrivateDirMode) - if err != nil { - // if mkdirAll("a/text") and "text" is not - // a directory, this will return syscall.ENOTDIR - return err - } - return IsDirWriteable(dir) -} - -// CreateDirAll is similar to TouchDirAll but returns error -// if the deepest directory was not empty. -func CreateDirAll(dir string) error { - err := TouchDirAll(dir) - if err == nil { - var ns []string - ns, err = ReadDir(dir) - if err != nil { - return err - } - if len(ns) != 0 { - err = fmt.Errorf("expected %q to be empty, got %q", dir, ns) - } - } - return err -} - -func Exist(name string) bool { - _, err := os.Stat(name) - return err == nil -} - -// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily -// shorten the length of the file. -func ZeroToEnd(f *os.File) error { - // TODO: support FALLOC_FL_ZERO_RANGE - off, err := f.Seek(0, os.SEEK_CUR) - if err != nil { - return err - } - lenf, lerr := f.Seek(0, os.SEEK_END) - if lerr != nil { - return lerr - } - if err = f.Truncate(off); err != nil { - return err - } - // make sure blocks remain allocated - if err = Preallocate(f, lenf, true); err != nil { - return err - } - _, err = f.Seek(off, os.SEEK_SET) - return err -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go deleted file mode 100644 index 338627f43c8..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "errors" - "os" -) - -var ( - ErrLocked = errors.New("fileutil: file already locked") -) - -type LockedFile struct{ *os.File } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go deleted file mode 100644 index 542550bc8a9..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows,!plan9,!solaris - -package fileutil - -import ( - "os" - "syscall" -) - -func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - f, err := os.OpenFile(path, flag, perm) - if err != nil { - return nil, err - } - if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil { - f.Close() - if err == syscall.EWOULDBLOCK { - err = ErrLocked - } - return nil, err - } - return &LockedFile{f}, nil -} - -func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - f, err := os.OpenFile(path, flag, perm) - if err != nil { - return nil, err - } - if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil { - f.Close() - return nil, err - } - return &LockedFile{f}, err -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go deleted file mode 100644 index dec25a1af44..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package fileutil - -import ( - "os" - "syscall" -) - -// This used to call syscall.Flock() but that call fails with EBADF on NFS. -// An alternative is lockf() which works on NFS but that call lets a process lock -// the same file twice. Instead, use Linux's non-standard open file descriptor -// locks which will block if the process already holds the file lock. -// -// constants from /usr/include/bits/fcntl-linux.h -const ( - F_OFD_GETLK = 37 - F_OFD_SETLK = 37 - F_OFD_SETLKW = 38 -) - -var ( - wrlck = syscall.Flock_t{ - Type: syscall.F_WRLCK, - Whence: int16(os.SEEK_SET), - Start: 0, - Len: 0, - } - - linuxTryLockFile = flockTryLockFile - linuxLockFile = flockLockFile -) - -func init() { - // use open file descriptor locks if the system supports it - getlk := syscall.Flock_t{Type: syscall.F_RDLCK} - if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil { - linuxTryLockFile = ofdTryLockFile - linuxLockFile = ofdLockFile - } -} - -func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - return linuxTryLockFile(path, flag, perm) -} - -func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - f, err := os.OpenFile(path, flag, perm) - if err != nil { - return nil, err - } - - flock := wrlck - if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil { - f.Close() - if err == syscall.EWOULDBLOCK { - err = ErrLocked - } - return nil, err - } - return &LockedFile{f}, nil -} - -func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - return linuxLockFile(path, flag, perm) -} - -func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - f, err := os.OpenFile(path, flag, perm) - if err != nil { - return nil, err - } - - flock := wrlck - err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock) - - if err != nil { - f.Close() - return nil, err - } - return &LockedFile{f}, err -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go deleted file mode 100644 index fee6a7c8f46..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "os" - "syscall" - "time" -) - -func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { - return nil, err - } - f, err := os.Open(path, flag, perm) - if err != nil { - return nil, ErrLocked - } - return &LockedFile{f}, nil -} - -func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil { - return nil, err - } - for { - f, err := os.OpenFile(path, flag, perm) - if err == nil { - return &LockedFile{f}, nil - } - time.Sleep(10 * time.Millisecond) - } -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go deleted file mode 100644 index 352ca5590d1..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build solaris - -package fileutil - -import ( - "os" - "syscall" -) - -func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Type = syscall.F_WRLCK - lock.Whence = 0 - lock.Pid = 0 - f, err := os.OpenFile(path, flag, perm) - if err != nil { - return nil, err - } - if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil { - f.Close() - if err == syscall.EAGAIN { - err = ErrLocked - } - return nil, err - } - return &LockedFile{f}, nil -} - -func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Type = syscall.F_WRLCK - lock.Whence = 0 - f, err := os.OpenFile(path, flag, perm) - if err != nil { - return nil, err - } - if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil { - f.Close() - return nil, err - } - return &LockedFile{f}, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go deleted file mode 100644 index ed01164de6e..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows,!plan9,!solaris,!linux - -package fileutil - -import ( - "os" -) - -func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - return flockTryLockFile(path, flag, perm) -} - -func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - return flockLockFile(path, flag, perm) -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go deleted file mode 100644 index 8698f4a8d11..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows - -package fileutil - -import ( - "errors" - "fmt" - "os" - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - - errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.") -) - -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - LOCKFILE_EXCLUSIVE_LOCK = 2 - LOCKFILE_FAIL_IMMEDIATELY = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) - -func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - f, err := open(path, flag, perm) - if err != nil { - return nil, err - } - if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil { - f.Close() - return nil, err - } - return &LockedFile{f}, nil -} - -func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - f, err := open(path, flag, perm) - if err != nil { - return nil, err - } - if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil { - f.Close() - return nil, err - } - return &LockedFile{f}, nil -} - -func open(path string, flag int, perm os.FileMode) (*os.File, error) { - if path == "" { - return nil, fmt.Errorf("cannot open empty filename") - } - var access uint32 - switch flag { - case syscall.O_RDONLY: - access = syscall.GENERIC_READ - case syscall.O_WRONLY: - access = syscall.GENERIC_WRITE - case syscall.O_RDWR: - access = syscall.GENERIC_READ | syscall.GENERIC_WRITE - case syscall.O_WRONLY | syscall.O_CREAT: - access = syscall.GENERIC_ALL - default: - panic(fmt.Errorf("flag %v is not supported", flag)) - } - fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]), - access, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, - syscall.OPEN_ALWAYS, - syscall.FILE_ATTRIBUTE_NORMAL, - 0) - if err != nil { - return nil, err - } - return os.NewFile(uintptr(fd), path), nil -} - -func lockFile(fd syscall.Handle, flags uint32) error { - var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK - flag |= flags - if fd == syscall.InvalidHandle { - return nil - } - err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) - if err == nil { - return nil - } else if err.Error() == errLocked.Error() { - return ErrLocked - } else if err != errLockViolation { - return err - } - return nil -} - -func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - var reserved uint32 = 0 - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go deleted file mode 100644 index bb7f0281239..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import "os" - -// Preallocate tries to allocate the space for given -// file. This operation is only supported on linux by a -// few filesystems (btrfs, ext4, etc.). -// If the operation is unsupported, no error will be returned. -// Otherwise, the error encountered will be returned. -func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { - if extendFile { - return preallocExtend(f, sizeInBytes) - } - return preallocFixed(f, sizeInBytes) -} - -func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { - curOff, err := f.Seek(0, os.SEEK_CUR) - if err != nil { - return err - } - size, err := f.Seek(sizeInBytes, os.SEEK_END) - if err != nil { - return err - } - if _, err = f.Seek(curOff, os.SEEK_SET); err != nil { - return err - } - if sizeInBytes > size { - return nil - } - return f.Truncate(sizeInBytes) -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go deleted file mode 100644 index 1ed09c560f2..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build darwin - -package fileutil - -import ( - "os" - "syscall" - "unsafe" -) - -func preallocExtend(f *os.File, sizeInBytes int64) error { - if err := preallocFixed(f, sizeInBytes); err != nil { - return err - } - return preallocExtendTrunc(f, sizeInBytes) -} - -func preallocFixed(f *os.File, sizeInBytes int64) error { - fstore := &syscall.Fstore_t{ - Flags: syscall.F_ALLOCATEALL, - Posmode: syscall.F_PEOFPOSMODE, - Length: sizeInBytes} - p := unsafe.Pointer(fstore) - _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p)) - if errno == 0 || errno == syscall.ENOTSUP { - return nil - } - return errno -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go deleted file mode 100644 index 50bd84f02ad..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package fileutil - -import ( - "os" - "syscall" -) - -func preallocExtend(f *os.File, sizeInBytes int64) error { - // use mode = 0 to change size - err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) - if err != nil { - errno, ok := err.(syscall.Errno) - // not supported; fallback - // fallocate EINTRs frequently in some environments; fallback - if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { - return preallocExtendTrunc(f, sizeInBytes) - } - } - return err -} - -func preallocFixed(f *os.File, sizeInBytes int64) error { - // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE - err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) - if err != nil { - errno, ok := err.(syscall.Errno) - // treat not supported as nil error - if ok && errno == syscall.ENOTSUP { - return nil - } - } - return err -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go deleted file mode 100644 index 162fbc5f782..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux,!darwin - -package fileutil - -import "os" - -func preallocExtend(f *os.File, sizeInBytes int64) error { - return preallocExtendTrunc(f, sizeInBytes) -} - -func preallocFixed(f *os.File, sizeInBytes int64) error { return nil } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go deleted file mode 100644 index 92fceab017f..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "os" - "path/filepath" - "sort" - "strings" - "time" -) - -func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { - return purgeFile(dirname, suffix, max, interval, stop, nil) -} - -// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. -func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error { - errC := make(chan error, 1) - go func() { - for { - fnames, err := ReadDir(dirname) - if err != nil { - errC <- err - return - } - newfnames := make([]string, 0) - for _, fname := range fnames { - if strings.HasSuffix(fname, suffix) { - newfnames = append(newfnames, fname) - } - } - sort.Strings(newfnames) - fnames = newfnames - for len(newfnames) > int(max) { - f := filepath.Join(dirname, newfnames[0]) - l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) - if err != nil { - break - } - if err = os.Remove(f); err != nil { - errC <- err - return - } - if err = l.Close(); err != nil { - plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) - errC <- err - return - } - plog.Infof("purged file %s successfully", f) - newfnames = newfnames[1:] - } - if purgec != nil { - for i := 0; i < len(fnames)-len(newfnames); i++ { - purgec <- fnames[i] - } - } - select { - case <-time.After(interval): - case <-stop: - return - } - } - }() - return errC -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go deleted file mode 100644 index 54dd41f4f35..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux,!darwin - -package fileutil - -import "os" - -// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. -func Fsync(f *os.File) error { - return f.Sync() -} - -// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform. -func Fdatasync(f *os.File) error { - return f.Sync() -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go deleted file mode 100644 index c2f39bf204d..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build darwin - -package fileutil - -import ( - "os" - "syscall" -) - -// Fsync on HFS/OSX flushes the data on to the physical drive but the drive -// may not write it to the persistent media for quite sometime and it may be -// written in out-of-order sequence. Using F_FULLFSYNC ensures that the -// physical drive's buffer will also get flushed to the media. -func Fsync(f *os.File) error { - _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0)) - if errno == 0 { - return nil - } - return errno -} - -// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence -// on physical drive media. -func Fdatasync(f *os.File) error { - return Fsync(f) -} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go deleted file mode 100644 index 1bbced915e9..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package fileutil - -import ( - "os" - "syscall" -) - -// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform. -func Fsync(f *os.File) error { - return f.Sync() -} - -// Fdatasync is similar to fsync(), but does not flush modified metadata -// unless that metadata is needed in order to allow a subsequent data retrieval -// to be correctly handled. -func Fdatasync(f *os.File) error { - return syscall.Fdatasync(int(f.Fd())) -} diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go deleted file mode 100644 index 859fc9d49e1..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// borrowed from golang/net/context/ctxhttp/cancelreq.go - -// Package httputil provides HTTP utility functions. -package httputil - -import ( - "io" - "io/ioutil" - "net/http" -) - -func RequestCanceler(req *http.Request) func() { - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} - -// GracefulClose drains http.Response.Body until it hits EOF -// and closes it. This prevents TCP/TLS connections from closing, -// therefore available for reuse. -func GracefulClose(resp *http.Response) { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() -} diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/github.com/coreos/etcd/pkg/idutil/id.go deleted file mode 100644 index 931beb2d058..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/idutil/id.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package idutil implements utility functions for generating unique, -// randomized ids. -package idutil - -import ( - "math" - "sync" - "time" -) - -const ( - tsLen = 5 * 8 - cntLen = 8 - suffixLen = tsLen + cntLen -) - -// Generator generates unique identifiers based on counters, timestamps, and -// a node member ID. -// -// The initial id is in this format: -// High order byte is memberID, next 5 bytes are from timestamp, -// and low order 2 bytes are 0s. -// | prefix | suffix | -// | 2 bytes | 5 bytes | 1 byte | -// | memberID | timestamp | cnt | -// -// The timestamp 5 bytes is different when the machine is restart -// after 1 ms and before 35 years. -// -// It increases suffix to generate the next id. -// The count field may overflow to timestamp field, which is intentional. -// It helps to extend the event window to 2^56. This doesn't break that -// id generated after restart is unique because etcd throughput is << -// 256req/ms(250k reqs/second). -type Generator struct { - mu sync.Mutex - // high order 2 bytes - prefix uint64 - // low order 6 bytes - suffix uint64 -} - -func NewGenerator(memberID uint16, now time.Time) *Generator { - prefix := uint64(memberID) << suffixLen - unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond) - suffix := lowbit(unixMilli, tsLen) << cntLen - return &Generator{ - prefix: prefix, - suffix: suffix, - } -} - -// Next generates a id that is unique. -func (g *Generator) Next() uint64 { - g.mu.Lock() - defer g.mu.Unlock() - g.suffix++ - id := g.prefix | lowbit(g.suffix, suffixLen) - return id -} - -func lowbit(x uint64, n uint) uint64 { - return x & (math.MaxUint64 >> (64 - n)) -} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go b/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go deleted file mode 100644 index 72de1593d3a..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ioutil - -import ( - "io" -) - -var defaultBufferBytes = 128 * 1024 - -// PageWriter implements the io.Writer interface so that writes will -// either be in page chunks or from flushing. -type PageWriter struct { - w io.Writer - // pageOffset tracks the page offset of the base of the buffer - pageOffset int - // pageBytes is the number of bytes per page - pageBytes int - // bufferedBytes counts the number of bytes pending for write in the buffer - bufferedBytes int - // buf holds the write buffer - buf []byte - // bufWatermarkBytes is the number of bytes the buffer can hold before it needs - // to be flushed. It is less than len(buf) so there is space for slack writes - // to bring the writer to page alignment. - bufWatermarkBytes int -} - -// NewPageWriter creates a new PageWriter. pageBytes is the number of bytes -// to write per page. pageOffset is the starting offset of io.Writer. -func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter { - return &PageWriter{ - w: w, - pageOffset: pageOffset, - pageBytes: pageBytes, - buf: make([]byte, defaultBufferBytes+pageBytes), - bufWatermarkBytes: defaultBufferBytes, - } -} - -func (pw *PageWriter) Write(p []byte) (n int, err error) { - if len(p)+pw.bufferedBytes <= pw.bufWatermarkBytes { - // no overflow - copy(pw.buf[pw.bufferedBytes:], p) - pw.bufferedBytes += len(p) - return len(p), nil - } - // complete the slack page in the buffer if unaligned - slack := pw.pageBytes - ((pw.pageOffset + pw.bufferedBytes) % pw.pageBytes) - if slack != pw.pageBytes { - partial := slack > len(p) - if partial { - // not enough data to complete the slack page - slack = len(p) - } - // special case: writing to slack page in buffer - copy(pw.buf[pw.bufferedBytes:], p[:slack]) - pw.bufferedBytes += slack - n = slack - p = p[slack:] - if partial { - // avoid forcing an unaligned flush - return n, nil - } - } - // buffer contents are now page-aligned; clear out - if err = pw.Flush(); err != nil { - return n, err - } - // directly write all complete pages without copying - if len(p) > pw.pageBytes { - pages := len(p) / pw.pageBytes - c, werr := pw.w.Write(p[:pages*pw.pageBytes]) - n += c - if werr != nil { - return n, werr - } - p = p[pages*pw.pageBytes:] - } - // write remaining tail to buffer - c, werr := pw.Write(p) - n += c - return n, werr -} - -func (pw *PageWriter) Flush() error { - if pw.bufferedBytes == 0 { - return nil - } - _, err := pw.w.Write(pw.buf[:pw.bufferedBytes]) - pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes - pw.bufferedBytes = 0 - return err -} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go b/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go deleted file mode 100644 index d3efcfe3d5a..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ioutil - -import ( - "fmt" - "io" -) - -// ReaderAndCloser implements io.ReadCloser interface by combining -// reader and closer together. -type ReaderAndCloser struct { - io.Reader - io.Closer -} - -var ( - ErrShortRead = fmt.Errorf("ioutil: short read") - ErrExpectEOF = fmt.Errorf("ioutil: expect EOF") -) - -// NewExactReadCloser returns a ReadCloser that returns errors if the underlying -// reader does not read back exactly the requested number of bytes. -func NewExactReadCloser(rc io.ReadCloser, totalBytes int64) io.ReadCloser { - return &exactReadCloser{rc: rc, totalBytes: totalBytes} -} - -type exactReadCloser struct { - rc io.ReadCloser - br int64 - totalBytes int64 -} - -func (e *exactReadCloser) Read(p []byte) (int, error) { - n, err := e.rc.Read(p) - e.br += int64(n) - if e.br > e.totalBytes { - return 0, ErrExpectEOF - } - if e.br < e.totalBytes && n == 0 { - return 0, ErrShortRead - } - return n, err -} - -func (e *exactReadCloser) Close() error { - if err := e.rc.Close(); err != nil { - return err - } - if e.br < e.totalBytes { - return ErrShortRead - } - return nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go b/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go deleted file mode 100644 index 0703ed476d8..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ioutil implements I/O utility functions. -package ioutil - -import "io" - -// NewLimitedBufferReader returns a reader that reads from the given reader -// but limits the amount of data returned to at most n bytes. -func NewLimitedBufferReader(r io.Reader, n int) io.Reader { - return &limitedBufferReader{ - r: r, - n: n, - } -} - -type limitedBufferReader struct { - r io.Reader - n int -} - -func (r *limitedBufferReader) Read(p []byte) (n int, err error) { - np := p - if len(np) > r.n { - np = np[:r.n] - } - return r.r.Read(np) -} diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/util.go b/vendor/github.com/coreos/etcd/pkg/ioutil/util.go deleted file mode 100644 index 192ad888c24..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/ioutil/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ioutil - -import ( - "io" - "os" - - "github.com/coreos/etcd/pkg/fileutil" -) - -// WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library, -// but calls Sync before closing the file. WriteAndSyncFile guarantees the data -// is synced if there is no error returned. -func WriteAndSyncFile(filename string, data []byte, perm os.FileMode) error { - f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err == nil { - err = fileutil.Fsync(f) - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go deleted file mode 100644 index cc750f4d3d6..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package logutil includes utilities to facilitate logging. -package logutil - -import ( - "fmt" - "sync" - "time" - - "github.com/coreos/pkg/capnslog" -) - -var ( - defaultMergePeriod = time.Second - defaultTimeOutputScale = 10 * time.Millisecond - - outputInterval = time.Second -) - -// line represents a log line that can be printed out -// through capnslog.PackageLogger. -type line struct { - level capnslog.LogLevel - str string -} - -func (l line) append(s string) line { - return line{ - level: l.level, - str: l.str + " " + s, - } -} - -// status represents the merge status of a line. -type status struct { - period time.Duration - - start time.Time // start time of latest merge period - count int // number of merged lines from starting -} - -func (s *status) isInMergePeriod(now time.Time) bool { - return s.period == 0 || s.start.Add(s.period).After(now) -} - -func (s *status) isEmpty() bool { return s.count == 0 } - -func (s *status) summary(now time.Time) string { - ts := s.start.Round(defaultTimeOutputScale) - took := now.Round(defaultTimeOutputScale).Sub(ts) - return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took) -} - -func (s *status) reset(now time.Time) { - s.start = now - s.count = 0 -} - -// MergeLogger supports merge logging, which merges repeated log lines -// and prints summary log lines instead. -// -// For merge logging, MergeLogger prints out the line when the line appears -// at the first time. MergeLogger holds the same log line printed within -// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod. -// It stops merging when the line doesn't appear within the -// defaultMergePeriod. -type MergeLogger struct { - *capnslog.PackageLogger - - mu sync.Mutex // protect statusm - statusm map[line]*status -} - -func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger { - l := &MergeLogger{ - PackageLogger: logger, - statusm: make(map[line]*status), - } - go l.outputLoop() - return l -} - -func (l *MergeLogger) MergeInfo(entries ...interface{}) { - l.merge(line{ - level: capnslog.INFO, - str: fmt.Sprint(entries...), - }) -} - -func (l *MergeLogger) MergeInfof(format string, args ...interface{}) { - l.merge(line{ - level: capnslog.INFO, - str: fmt.Sprintf(format, args...), - }) -} - -func (l *MergeLogger) MergeNotice(entries ...interface{}) { - l.merge(line{ - level: capnslog.NOTICE, - str: fmt.Sprint(entries...), - }) -} - -func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) { - l.merge(line{ - level: capnslog.NOTICE, - str: fmt.Sprintf(format, args...), - }) -} - -func (l *MergeLogger) MergeWarning(entries ...interface{}) { - l.merge(line{ - level: capnslog.WARNING, - str: fmt.Sprint(entries...), - }) -} - -func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) { - l.merge(line{ - level: capnslog.WARNING, - str: fmt.Sprintf(format, args...), - }) -} - -func (l *MergeLogger) MergeError(entries ...interface{}) { - l.merge(line{ - level: capnslog.ERROR, - str: fmt.Sprint(entries...), - }) -} - -func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) { - l.merge(line{ - level: capnslog.ERROR, - str: fmt.Sprintf(format, args...), - }) -} - -func (l *MergeLogger) merge(ln line) { - l.mu.Lock() - - // increase count if the logger is merging the line - if status, ok := l.statusm[ln]; ok { - status.count++ - l.mu.Unlock() - return - } - - // initialize status of the line - l.statusm[ln] = &status{ - period: defaultMergePeriod, - start: time.Now(), - } - // release the lock before IO operation - l.mu.Unlock() - // print out the line at its first time - l.PackageLogger.Logf(ln.level, ln.str) -} - -func (l *MergeLogger) outputLoop() { - for now := range time.Tick(outputInterval) { - var outputs []line - - l.mu.Lock() - for ln, status := range l.statusm { - if status.isInMergePeriod(now) { - continue - } - if status.isEmpty() { - delete(l.statusm, ln) - continue - } - outputs = append(outputs, ln.append(status.summary(now))) - status.reset(now) - } - l.mu.Unlock() - - for _, o := range outputs { - l.PackageLogger.Logf(o.level, o.str) - } - } -} diff --git a/vendor/github.com/coreos/etcd/pkg/monotime/issue15006.s b/vendor/github.com/coreos/etcd/pkg/monotime/issue15006.s deleted file mode 100644 index c3132a1f0c8..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/monotime/issue15006.s +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (C) 2016 Arista Networks, Inc. -// Use of this source code is governed by the Apache License 2.0 -// that can be found in the COPYING file. - -// This file is intentionally empty. -// It's a workaround for https://github.com/golang/go/issues/15006 \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/pkg/monotime/monotime.go b/vendor/github.com/coreos/etcd/pkg/monotime/monotime.go deleted file mode 100644 index a5e16ce1d0e..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/monotime/monotime.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package monotime - -import ( - "time" -) - -// Time represents a point in monotonic time -type Time uint64 - -func (t Time) Add(d time.Duration) Time { - return Time(uint64(t) + uint64(d.Nanoseconds())) -} diff --git a/vendor/github.com/coreos/etcd/pkg/monotime/nanotime.go b/vendor/github.com/coreos/etcd/pkg/monotime/nanotime.go deleted file mode 100644 index e3fc846ef9e..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/monotime/nanotime.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2016 Arista Networks, Inc. -// Use of this source code is governed by the Apache License 2.0 -// that can be found in the COPYING file. - -// Package monotime provides a fast monotonic clock source. -package monotime - -import ( - _ "unsafe" // required to use //go:linkname -) - -//go:noescape -//go:linkname nanotime runtime.nanotime -func nanotime() int64 - -// Now returns the current time in nanoseconds from a monotonic clock. -// The time returned is based on some arbitrary platform-specific point in the -// past. The time returned is guaranteed to increase monotonically at a -// constant rate, unlike time.Now() from the Go standard library, which may -// slow down, speed up, jump forward or backward, due to NTP activity or leap -// seconds. -func Now() Time { - return Time(nanotime()) -} diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go deleted file mode 100644 index 418580ac48d..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package netutil - -import ( - "fmt" - "os/exec" -) - -// DropPort drops all tcp packets that are received from the given port and sent to the given port. -func DropPort(port int) error { - cmdStr := fmt.Sprintf("sudo iptables -A OUTPUT -p tcp --destination-port %d -j DROP", port) - if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil { - return err - } - cmdStr = fmt.Sprintf("sudo iptables -A INPUT -p tcp --destination-port %d -j DROP", port) - _, err := exec.Command("/bin/sh", "-c", cmdStr).Output() - return err -} - -// RecoverPort stops dropping tcp packets at given port. -func RecoverPort(port int) error { - cmdStr := fmt.Sprintf("sudo iptables -D OUTPUT -p tcp --destination-port %d -j DROP", port) - if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil { - return err - } - cmdStr = fmt.Sprintf("sudo iptables -D INPUT -p tcp --destination-port %d -j DROP", port) - _, err := exec.Command("/bin/sh", "-c", cmdStr).Output() - return err -} - -// SetLatency adds latency in millisecond scale with random variations. -func SetLatency(ms, rv int) error { - ifces, err := GetDefaultInterfaces() - if err != nil { - return err - } - - if rv > ms { - rv = 1 - } - for ifce := range ifces { - cmdStr := fmt.Sprintf("sudo tc qdisc add dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv) - _, err = exec.Command("/bin/sh", "-c", cmdStr).Output() - if err != nil { - // the rule has already been added. Overwrite it. - cmdStr = fmt.Sprintf("sudo tc qdisc change dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv) - _, err = exec.Command("/bin/sh", "-c", cmdStr).Output() - if err != nil { - return err - } - } - } - return nil -} - -// RemoveLatency resets latency configurations. -func RemoveLatency() error { - ifces, err := GetDefaultInterfaces() - if err != nil { - return err - } - for ifce := range ifces { - _, err = exec.Command("/bin/sh", "-c", fmt.Sprintf("sudo tc qdisc del dev %s root netem", ifce)).Output() - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go deleted file mode 100644 index 7f4c3e67c2a..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux - -package netutil - -func DropPort(port int) error { return nil } - -func RecoverPort(port int) error { return nil } - -func SetLatency(ms, rv int) error { return nil } - -func RemoveLatency() error { return nil } diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go deleted file mode 100644 index bb5f392b34c..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package netutil implements network-related utility functions. -package netutil - -import ( - "net" - "net/url" - "reflect" - "sort" - "time" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/pkg/capnslog" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil") - - // indirection for testing - resolveTCPAddr = net.ResolveTCPAddr -) - -const retryInterval = time.Second - -// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr. -// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames -// are resolved. -func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) { - newurls := make([][]url.URL, 0) - for _, us := range urls { - nus := make([]url.URL, len(us)) - for i, u := range us { - nu, err := url.Parse(u.String()) - if err != nil { - return nil, err - } - nus[i] = *nu - } - for i, u := range nus { - h, err := resolveURL(ctx, u) - if err != nil { - return nil, err - } - if h != "" { - nus[i].Host = h - } - } - newurls = append(newurls, nus) - } - return newurls, nil -} - -func resolveURL(ctx context.Context, u url.URL) (string, error) { - for ctx.Err() == nil { - host, _, err := net.SplitHostPort(u.Host) - if err != nil { - plog.Errorf("could not parse url %s during tcp resolving", u.Host) - return "", err - } - if host == "localhost" || net.ParseIP(host) != nil { - return "", nil - } - tcpAddr, err := resolveTCPAddr("tcp", u.Host) - if err == nil { - plog.Infof("resolving %s to %s", u.Host, tcpAddr.String()) - return tcpAddr.String(), nil - } - plog.Warningf("failed resolving host %s (%v); retrying in %v", u.Host, err, retryInterval) - select { - case <-ctx.Done(): - plog.Errorf("could not resolve host %s", u.Host) - return "", err - case <-time.After(retryInterval): - } - } - return "", ctx.Err() -} - -// urlsEqual checks equality of url.URLS between two arrays. -// This check pass even if an URL is in hostname and opposite is in IP address. -func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) bool { - if len(a) != len(b) { - return false - } - urls, err := resolveTCPAddrs(ctx, [][]url.URL{a, b}) - if err != nil { - return false - } - a, b = urls[0], urls[1] - sort.Sort(types.URLs(a)) - sort.Sort(types.URLs(b)) - for i := range a { - if !reflect.DeepEqual(a[i], b[i]) { - return false - } - } - - return true -} - -func URLStringsEqual(ctx context.Context, a []string, b []string) bool { - if len(a) != len(b) { - return false - } - urlsA := make([]url.URL, 0) - for _, str := range a { - u, err := url.Parse(str) - if err != nil { - return false - } - urlsA = append(urlsA, *u) - } - urlsB := make([]url.URL, 0) - for _, str := range b { - u, err := url.Parse(str) - if err != nil { - return false - } - urlsB = append(urlsB, *u) - } - - return urlsEqual(ctx, urlsA, urlsB) -} - -func IsNetworkTimeoutError(err error) bool { - nerr, ok := err.(net.Error) - return ok && nerr.Timeout() -} diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/routes.go b/vendor/github.com/coreos/etcd/pkg/netutil/routes.go deleted file mode 100644 index 3eb6a19ec84..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/netutil/routes.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux - -package netutil - -import ( - "fmt" - "runtime" -) - -// GetDefaultHost fetches the a resolvable name that corresponds -// to the machine's default routable interface -func GetDefaultHost() (string, error) { - return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH) -} - -// GetDefaultInterfaces fetches the device name of default routable interface. -func GetDefaultInterfaces() (map[string]uint8, error) { - return nil, fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH) -} diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go b/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go deleted file mode 100644 index 5d234d46038..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package netutil - -import ( - "bytes" - "encoding/binary" - "fmt" - "net" - "sort" - "syscall" - - "github.com/coreos/etcd/pkg/cpuutil" -) - -var errNoDefaultRoute = fmt.Errorf("could not find default route") -var errNoDefaultHost = fmt.Errorf("could not find default host") -var errNoDefaultInterface = fmt.Errorf("could not find default interface") - -// GetDefaultHost obtains the first IP address of machine from the routing table and returns the IP address as string. -// An IPv4 address is preferred to an IPv6 address for backward compatibility. -func GetDefaultHost() (string, error) { - rmsgs, rerr := getDefaultRoutes() - if rerr != nil { - return "", rerr - } - - // prioritize IPv4 - if rmsg, ok := rmsgs[syscall.AF_INET]; ok { - if host, err := chooseHost(syscall.AF_INET, rmsg); host != "" || err != nil { - return host, err - } - delete(rmsgs, syscall.AF_INET) - } - - // sort so choice is deterministic - var families []int - for family := range rmsgs { - families = append(families, int(family)) - } - sort.Ints(families) - - for _, f := range families { - family := uint8(f) - if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil { - return host, err - } - } - - return "", errNoDefaultHost -} - -func chooseHost(family uint8, rmsg *syscall.NetlinkMessage) (string, error) { - host, oif, err := parsePREFSRC(rmsg) - if host != "" || err != nil { - return host, err - } - - // prefsrc not detected, fall back to getting address from iface - ifmsg, ierr := getIfaceAddr(oif, family) - if ierr != nil { - return "", ierr - } - - attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg) - if aerr != nil { - return "", aerr - } - - for _, attr := range attrs { - // search for RTA_DST because ipv6 doesn't have RTA_SRC - if attr.Attr.Type == syscall.RTA_DST { - return net.IP(attr.Value).String(), nil - } - } - - return "", nil -} - -func getDefaultRoutes() (map[uint8]*syscall.NetlinkMessage, error) { - dat, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC) - if err != nil { - return nil, err - } - - msgs, msgErr := syscall.ParseNetlinkMessage(dat) - if msgErr != nil { - return nil, msgErr - } - - routes := make(map[uint8]*syscall.NetlinkMessage) - rtmsg := syscall.RtMsg{} - for _, m := range msgs { - if m.Header.Type != syscall.RTM_NEWROUTE { - continue - } - buf := bytes.NewBuffer(m.Data[:syscall.SizeofRtMsg]) - if rerr := binary.Read(buf, cpuutil.ByteOrder(), &rtmsg); rerr != nil { - continue - } - if rtmsg.Dst_len == 0 && rtmsg.Table == syscall.RT_TABLE_MAIN { - // zero-length Dst_len implies default route - msg := m - routes[rtmsg.Family] = &msg - } - } - - if len(routes) > 0 { - return routes, nil - } - - return nil, errNoDefaultRoute -} - -// Used to get an address of interface. -func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) { - dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, int(family)) - if err != nil { - return nil, err - } - - msgs, msgErr := syscall.ParseNetlinkMessage(dat) - if msgErr != nil { - return nil, msgErr - } - - ifaddrmsg := syscall.IfAddrmsg{} - for _, m := range msgs { - if m.Header.Type != syscall.RTM_NEWADDR { - continue - } - buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg]) - if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifaddrmsg); rerr != nil { - continue - } - if ifaddrmsg.Index == idx { - return &m, nil - } - } - - return nil, fmt.Errorf("could not find address for interface index %v", idx) - -} - -// Used to get a name of interface. -func getIfaceLink(idx uint32) (*syscall.NetlinkMessage, error) { - dat, err := syscall.NetlinkRIB(syscall.RTM_GETLINK, syscall.AF_UNSPEC) - if err != nil { - return nil, err - } - - msgs, msgErr := syscall.ParseNetlinkMessage(dat) - if msgErr != nil { - return nil, msgErr - } - - ifinfomsg := syscall.IfInfomsg{} - for _, m := range msgs { - if m.Header.Type != syscall.RTM_NEWLINK { - continue - } - buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfInfomsg]) - if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifinfomsg); rerr != nil { - continue - } - if ifinfomsg.Index == int32(idx) { - return &m, nil - } - } - - return nil, fmt.Errorf("could not find link for interface index %v", idx) -} - -// GetDefaultInterfaces gets names of interfaces and returns a map[interface]families. -func GetDefaultInterfaces() (map[string]uint8, error) { - interfaces := make(map[string]uint8) - rmsgs, rerr := getDefaultRoutes() - if rerr != nil { - return interfaces, rerr - } - - for family, rmsg := range rmsgs { - _, oif, err := parsePREFSRC(rmsg) - if err != nil { - return interfaces, err - } - - ifmsg, ierr := getIfaceLink(oif) - if ierr != nil { - return interfaces, ierr - } - - attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg) - if aerr != nil { - return interfaces, aerr - } - - for _, attr := range attrs { - if attr.Attr.Type == syscall.IFLA_IFNAME { - // key is an interface name - // possible values: 2 - AF_INET, 10 - AF_INET6, 12 - dualstack - interfaces[string(attr.Value[:len(attr.Value)-1])] += family - } - } - } - if len(interfaces) > 0 { - return interfaces, nil - } - return interfaces, errNoDefaultInterface -} - -// parsePREFSRC returns preferred source address and output interface index (RTA_OIF). -func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) { - var attrs []syscall.NetlinkRouteAttr - attrs, err = syscall.ParseNetlinkRouteAttr(m) - if err != nil { - return "", 0, err - } - - for _, attr := range attrs { - if attr.Attr.Type == syscall.RTA_PREFSRC { - host = net.IP(attr.Value).String() - } - if attr.Attr.Type == syscall.RTA_OIF { - oif = cpuutil.ByteOrder().Uint32(attr.Value) - } - if host != "" && oif != uint32(0) { - break - } - } - - if oif == 0 { - err = errNoDefaultRoute - } - return -} diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go deleted file mode 100644 index f26254ba933..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pathutil implements utility functions for handling slash-separated -// paths. -package pathutil - -import "path" - -// CanonicalURLPath returns the canonical url path for p, which follows the rules: -// 1. the path always starts with "/" -// 2. replace multiple slashes with a single slash -// 3. replace each '.' '..' path name element with equivalent one -// 4. keep the trailing slash -// The function is borrowed from stdlib http.cleanPath in server.go. -func CanonicalURLPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root, - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} diff --git a/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go b/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go deleted file mode 100644 index d70f98dd82f..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil defines interfaces for handling Protocol Buffer objects. -package pbutil - -import "github.com/coreos/pkg/capnslog" - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/pbutil") -) - -type Marshaler interface { - Marshal() (data []byte, err error) -} - -type Unmarshaler interface { - Unmarshal(data []byte) error -} - -func MustMarshal(m Marshaler) []byte { - d, err := m.Marshal() - if err != nil { - plog.Panicf("marshal should never fail (%v)", err) - } - return d -} - -func MustUnmarshal(um Unmarshaler, data []byte) { - if err := um.Unmarshal(data); err != nil { - plog.Panicf("unmarshal should never fail (%v)", err) - } -} - -func MaybeUnmarshal(um Unmarshaler, data []byte) bool { - if err := um.Unmarshal(data); err != nil { - return false - } - return true -} - -func GetBool(v *bool) (vv bool, set bool) { - if v == nil { - return false, false - } - return *v, true -} - -func Boolp(b bool) *bool { return &b } diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go b/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go deleted file mode 100644 index 8e9359db28c..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package runtime implements utility functions for runtime systems. -package runtime - -import ( - "io/ioutil" - "syscall" -) - -func FDLimit() (uint64, error) { - var rlimit syscall.Rlimit - if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil { - return 0, err - } - return rlimit.Cur, nil -} - -func FDUsage() (uint64, error) { - fds, err := ioutil.ReadDir("/proc/self/fd") - if err != nil { - return 0, err - } - return uint64(len(fds)), nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go b/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go deleted file mode 100644 index 0cbdb88c7a6..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux - -package runtime - -import ( - "fmt" - "runtime" -) - -func FDLimit() (uint64, error) { - return 0, fmt.Errorf("cannot get FDLimit on %s", runtime.GOOS) -} - -func FDUsage() (uint64, error) { - return 0, fmt.Errorf("cannot get FDUsage on %s", runtime.GOOS) -} diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/doc.go b/vendor/github.com/coreos/etcd/pkg/schedule/doc.go deleted file mode 100644 index cca2c75fb6a..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/schedule/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package schedule provides mechanisms and policies for scheduling units of work. -package schedule diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go deleted file mode 100644 index 79c59b01288..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schedule - -import ( - "sync" - - "golang.org/x/net/context" -) - -type Job func(context.Context) - -// Scheduler can schedule jobs. -type Scheduler interface { - // Schedule asks the scheduler to schedule a job defined by the given func. - // Schedule to a stopped scheduler might panic. - Schedule(j Job) - - // Pending returns number of pending jobs - Pending() int - - // Scheduled returns the number of scheduled jobs (excluding pending jobs) - Scheduled() int - - // Finished returns the number of finished jobs - Finished() int - - // WaitFinish waits until at least n job are finished and all pending jobs are finished. - WaitFinish(n int) - - // Stop stops the scheduler. - Stop() -} - -type fifo struct { - mu sync.Mutex - - resume chan struct{} - scheduled int - finished int - pendings []Job - - ctx context.Context - cancel context.CancelFunc - - finishCond *sync.Cond - donec chan struct{} -} - -// NewFIFOScheduler returns a Scheduler that schedules jobs in FIFO -// order sequentially -func NewFIFOScheduler() Scheduler { - f := &fifo{ - resume: make(chan struct{}, 1), - donec: make(chan struct{}, 1), - } - f.finishCond = sync.NewCond(&f.mu) - f.ctx, f.cancel = context.WithCancel(context.Background()) - go f.run() - return f -} - -// Schedule schedules a job that will be ran in FIFO order sequentially. -func (f *fifo) Schedule(j Job) { - f.mu.Lock() - defer f.mu.Unlock() - - if f.cancel == nil { - panic("schedule: schedule to stopped scheduler") - } - - if len(f.pendings) == 0 { - select { - case f.resume <- struct{}{}: - default: - } - } - f.pendings = append(f.pendings, j) - - return -} - -func (f *fifo) Pending() int { - f.mu.Lock() - defer f.mu.Unlock() - return len(f.pendings) -} - -func (f *fifo) Scheduled() int { - f.mu.Lock() - defer f.mu.Unlock() - return f.scheduled -} - -func (f *fifo) Finished() int { - f.finishCond.L.Lock() - defer f.finishCond.L.Unlock() - return f.finished -} - -func (f *fifo) WaitFinish(n int) { - f.finishCond.L.Lock() - for f.finished < n || len(f.pendings) != 0 { - f.finishCond.Wait() - } - f.finishCond.L.Unlock() -} - -// Stop stops the scheduler and cancels all pending jobs. -func (f *fifo) Stop() { - f.mu.Lock() - f.cancel() - f.cancel = nil - f.mu.Unlock() - <-f.donec -} - -func (f *fifo) run() { - // TODO: recover from job panic? - defer func() { - close(f.donec) - close(f.resume) - }() - - for { - var todo Job - f.mu.Lock() - if len(f.pendings) != 0 { - f.scheduled++ - todo = f.pendings[0] - } - f.mu.Unlock() - if todo == nil { - select { - case <-f.resume: - case <-f.ctx.Done(): - f.mu.Lock() - pendings := f.pendings - f.pendings = nil - f.mu.Unlock() - // clean up pending jobs - for _, todo := range pendings { - todo(f.ctx) - } - return - } - } else { - todo(f.ctx) - f.finishCond.L.Lock() - f.finished++ - f.pendings = f.pendings[1:] - f.finishCond.Broadcast() - f.finishCond.L.Unlock() - } - } -} diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/leak.go b/vendor/github.com/coreos/etcd/pkg/testutil/leak.go deleted file mode 100644 index 80bc0eebc8a..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/testutil/leak.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package testutil - -import ( - "fmt" - "net/http" - "os" - "regexp" - "runtime" - "sort" - "strings" - "testing" - "time" -) - -/* -CheckLeakedGoroutine verifies tests do not leave any leaky -goroutines. It returns true when there are goroutines still -running(leaking) after all tests. - - import "github.com/coreos/etcd/pkg/testutil" - - func TestMain(m *testing.M) { - v := m.Run() - if v == 0 && testutil.CheckLeakedGoroutine() { - os.Exit(1) - } - os.Exit(v) - } - - func TestSample(t *testing.T) { - defer testutil.AfterTest(t) - ... - } - -*/ -func CheckLeakedGoroutine() bool { - if testing.Short() { - // not counting goroutines for leakage in -short mode - return false - } - gs := interestingGoroutines() - if len(gs) == 0 { - return false - } - - stackCount := make(map[string]int) - re := regexp.MustCompile(`\(0[0-9a-fx, ]*\)`) - for _, g := range gs { - // strip out pointer arguments in first function of stack dump - normalized := string(re.ReplaceAll([]byte(g), []byte("(...)"))) - stackCount[normalized]++ - } - - fmt.Fprintf(os.Stderr, "Too many goroutines running after all test(s).\n") - for stack, count := range stackCount { - fmt.Fprintf(os.Stderr, "%d instances of:\n%s\n", count, stack) - } - return true -} - -func AfterTest(t *testing.T) { - http.DefaultTransport.(*http.Transport).CloseIdleConnections() - if testing.Short() { - return - } - var bad string - badSubstring := map[string]string{ - ").writeLoop(": "a Transport", - "created by net/http/httptest.(*Server).Start": "an httptest.Server", - "timeoutHandler": "a TimeoutHandler", - "net.(*netFD).connect(": "a timing out dial", - ").noteClientGone(": "a closenotifier sender", - ").readLoop(": "a Transport", - } - - var stacks string - for i := 0; i < 6; i++ { - bad = "" - stacks = strings.Join(interestingGoroutines(), "\n\n") - for substr, what := range badSubstring { - if strings.Contains(stacks, substr) { - bad = what - } - } - if bad == "" { - return - } - // Bad stuff found, but goroutines might just still be - // shutting down, so give it some time. - time.Sleep(50 * time.Millisecond) - } - t.Errorf("Test appears to have leaked %s:\n%s", bad, stacks) -} - -func interestingGoroutines() (gs []string) { - buf := make([]byte, 2<<20) - buf = buf[:runtime.Stack(buf, true)] - for _, g := range strings.Split(string(buf), "\n\n") { - sl := strings.SplitN(g, "\n", 2) - if len(sl) != 2 { - continue - } - stack := strings.TrimSpace(sl[1]) - if stack == "" || - strings.Contains(stack, "created by os/signal.init") || - strings.Contains(stack, "runtime/panic.go") || - strings.Contains(stack, "created by testing.RunTests") || - strings.Contains(stack, "testing.Main(") || - strings.Contains(stack, "runtime.goexit") || - strings.Contains(stack, "github.com/coreos/etcd/pkg/testutil.interestingGoroutines") || - strings.Contains(stack, "github.com/coreos/etcd/pkg/logutil.(*MergeLogger).outputLoop") || - strings.Contains(stack, "github.com/golang/glog.(*loggingT).flushDaemon") || - strings.Contains(stack, "created by runtime.gc") || - strings.Contains(stack, "runtime.MHeap_Scavenger") { - continue - } - gs = append(gs, stack) - } - sort.Strings(gs) - return -} diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/pauseable_handler.go b/vendor/github.com/coreos/etcd/pkg/testutil/pauseable_handler.go deleted file mode 100644 index e0d6aca26df..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/testutil/pauseable_handler.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutil - -import ( - "net/http" - "sync" -) - -type PauseableHandler struct { - Next http.Handler - mu sync.Mutex - paused bool -} - -func (ph *PauseableHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ph.mu.Lock() - paused := ph.paused - ph.mu.Unlock() - if !paused { - ph.Next.ServeHTTP(w, r) - } else { - hj, ok := w.(http.Hijacker) - if !ok { - panic("webserver doesn't support hijacking") - } - conn, _, err := hj.Hijack() - if err != nil { - panic(err.Error()) - } - conn.Close() - } -} - -func (ph *PauseableHandler) Pause() { - ph.mu.Lock() - defer ph.mu.Unlock() - ph.paused = true -} - -func (ph *PauseableHandler) Resume() { - ph.mu.Lock() - defer ph.mu.Unlock() - ph.paused = false -} diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/recorder.go b/vendor/github.com/coreos/etcd/pkg/testutil/recorder.go deleted file mode 100644 index bdbbd8cc5d5..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/testutil/recorder.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutil - -import ( - "errors" - "fmt" - "sync" - "time" -) - -type Action struct { - Name string - Params []interface{} -} - -type Recorder interface { - // Record publishes an Action (e.g., function call) which will - // be reflected by Wait() or Chan() - Record(a Action) - // Wait waits until at least n Actions are available or returns with error - Wait(n int) ([]Action, error) - // Action returns immediately available Actions - Action() []Action - // Chan returns the channel for actions published by Record - Chan() <-chan Action -} - -// RecorderBuffered appends all Actions to a slice -type RecorderBuffered struct { - sync.Mutex - actions []Action -} - -func (r *RecorderBuffered) Record(a Action) { - r.Lock() - r.actions = append(r.actions, a) - r.Unlock() -} -func (r *RecorderBuffered) Action() []Action { - r.Lock() - cpy := make([]Action, len(r.actions)) - copy(cpy, r.actions) - r.Unlock() - return cpy -} -func (r *RecorderBuffered) Wait(n int) (acts []Action, err error) { - // legacy racey behavior - WaitSchedule() - acts = r.Action() - if len(acts) < n { - err = newLenErr(n, len(acts)) - } - return acts, err -} - -func (r *RecorderBuffered) Chan() <-chan Action { - ch := make(chan Action) - go func() { - acts := r.Action() - for i := range acts { - ch <- acts[i] - } - close(ch) - }() - return ch -} - -// RecorderStream writes all Actions to an unbuffered channel -type recorderStream struct { - ch chan Action -} - -func NewRecorderStream() Recorder { - return &recorderStream{ch: make(chan Action)} -} - -func (r *recorderStream) Record(a Action) { - r.ch <- a -} - -func (r *recorderStream) Action() (acts []Action) { - for { - select { - case act := <-r.ch: - acts = append(acts, act) - default: - return acts - } - } -} - -func (r *recorderStream) Chan() <-chan Action { - return r.ch -} - -func (r *recorderStream) Wait(n int) ([]Action, error) { - acts := make([]Action, n) - timeoutC := time.After(5 * time.Second) - for i := 0; i < n; i++ { - select { - case acts[i] = <-r.ch: - case <-timeoutC: - acts = acts[:i] - return acts, newLenErr(n, i) - } - } - // extra wait to catch any Action spew - select { - case act := <-r.ch: - acts = append(acts, act) - case <-time.After(10 * time.Millisecond): - } - return acts, nil -} - -func newLenErr(expected int, actual int) error { - s := fmt.Sprintf("len(actions) = %d, expected >= %d", actual, expected) - return errors.New(s) -} diff --git a/vendor/github.com/coreos/etcd/pkg/testutil/testutil.go b/vendor/github.com/coreos/etcd/pkg/testutil/testutil.go deleted file mode 100644 index db2dd3285b0..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/testutil/testutil.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package testutil provides test utility functions. -package testutil - -import ( - "net/url" - "runtime" - "testing" - "time" -) - -// WaitSchedule briefly sleeps in order to invoke the go scheduler. -// TODO: improve this when we are able to know the schedule or status of target go-routine. -func WaitSchedule() { - time.Sleep(10 * time.Millisecond) -} - -func MustNewURLs(t *testing.T, urls []string) []url.URL { - if urls == nil { - return nil - } - var us []url.URL - for _, url := range urls { - u := MustNewURL(t, url) - us = append(us, *u) - } - return us -} - -func MustNewURL(t *testing.T, s string) *url.URL { - u, err := url.Parse(s) - if err != nil { - t.Fatalf("parse %v error: %v", s, err) - } - return u -} - -// FatalStack helps to fatal the test and print out the stacks of all running goroutines. -func FatalStack(t *testing.T, s string) { - stackTrace := make([]byte, 1024*1024) - n := runtime.Stack(stackTrace, true) - t.Error(string(stackTrace[:n])) - t.Fatalf(s) -} diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go deleted file mode 100644 index 3b6aa670baf..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package tlsutil provides utility functions for handling TLS. -package tlsutil diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go deleted file mode 100644 index 79b1f632ed5..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tlsutil - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "io/ioutil" -) - -// NewCertPool creates x509 certPool with provided CA files. -func NewCertPool(CAFiles []string) (*x509.CertPool, error) { - certPool := x509.NewCertPool() - - for _, CAFile := range CAFiles { - pemByte, err := ioutil.ReadFile(CAFile) - if err != nil { - return nil, err - } - - for { - var block *pem.Block - block, pemByte = pem.Decode(pemByte) - if block == nil { - break - } - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - certPool.AddCert(cert) - } - } - - return certPool, nil -} - -// NewCert generates TLS cert by using the given cert,key and parse function. -func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { - cert, err := ioutil.ReadFile(certfile) - if err != nil { - return nil, err - } - - key, err := ioutil.ReadFile(keyfile) - if err != nil { - return nil, err - } - - if parseFunc == nil { - parseFunc = tls.X509KeyPair - } - - tlsCert, err := parseFunc(cert, key) - if err != nil { - return nil, err - } - return &tlsCert, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/doc.go b/vendor/github.com/coreos/etcd/pkg/transport/doc.go deleted file mode 100644 index 37658ce591a..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport implements various HTTP transport utilities based on Go -// net package. -package transport diff --git a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go deleted file mode 100644 index 6ccae4ee4a1..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/tls" - "fmt" - "net" - "time" -) - -type keepAliveConn interface { - SetKeepAlive(bool) error - SetKeepAlivePeriod(d time.Duration) error -} - -// NewKeepAliveListener returns a listener that listens on the given address. -// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. -// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. -// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html -func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) { - if scheme == "https" { - if tlscfg == nil { - return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") - } - return newTLSKeepaliveListener(l, tlscfg), nil - } - - return &keepaliveListener{ - Listener: l, - }, nil -} - -type keepaliveListener struct{ net.Listener } - -func (kln *keepaliveListener) Accept() (net.Conn, error) { - c, err := kln.Listener.Accept() - if err != nil { - return nil, err - } - kac := c.(keepAliveConn) - // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl - // default on linux: 30 + 8 * 30 - // default on osx: 30 + 8 * 75 - kac.SetKeepAlive(true) - kac.SetKeepAlivePeriod(30 * time.Second) - return c, nil -} - -// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. -type tlsKeepaliveListener struct { - net.Listener - config *tls.Config -} - -// Accept waits for and returns the next incoming TLS connection. -// The returned connection c is a *tls.Conn. -func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { - c, err = l.Listener.Accept() - if err != nil { - return - } - kac := c.(keepAliveConn) - // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl - // default on linux: 30 + 8 * 30 - // default on osx: 30 + 8 * 75 - kac.SetKeepAlive(true) - kac.SetKeepAlivePeriod(30 * time.Second) - c = tls.Server(c, l.config) - return -} - -// NewListener creates a Listener which accepts connections from an inner -// Listener and wraps each connection with Server. -// The configuration config must be non-nil and must have -// at least one certificate. -func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener { - l := &tlsKeepaliveListener{} - l.Listener = inner - l.config = config - return l -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go deleted file mode 100644 index 930c542066f..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2013 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package transport provides network utility functions, complementing the more -// common ones in the net package. -package transport - -import ( - "errors" - "net" - "sync" - "time" -) - -var ( - ErrNotTCP = errors.New("only tcp connections have keepalive") -) - -// LimitListener returns a Listener that accepts at most n simultaneous -// connections from the provided Listener. -func LimitListener(l net.Listener, n int) net.Listener { - return &limitListener{l, make(chan struct{}, n)} -} - -type limitListener struct { - net.Listener - sem chan struct{} -} - -func (l *limitListener) acquire() { l.sem <- struct{}{} } -func (l *limitListener) release() { <-l.sem } - -func (l *limitListener) Accept() (net.Conn, error) { - l.acquire() - c, err := l.Listener.Accept() - if err != nil { - l.release() - return nil, err - } - return &limitListenerConn{Conn: c, release: l.release}, nil -} - -type limitListenerConn struct { - net.Conn - releaseOnce sync.Once - release func() -} - -func (l *limitListenerConn) Close() error { - err := l.Conn.Close() - l.releaseOnce.Do(l.release) - return err -} - -func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { - tcpc, ok := l.Conn.(*net.TCPConn) - if !ok { - return ErrNotTCP - } - return tcpc.SetKeepAlive(doKeepAlive) -} - -func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { - tcpc, ok := l.Conn.(*net.TCPConn) - if !ok { - return ErrNotTCP - } - return tcpc.SetKeepAlivePeriod(d) -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go deleted file mode 100644 index 4fcdb5ad9a3..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "log" - "math/big" - "net" - "os" - "path/filepath" - "time" - - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/pkg/tlsutil" -) - -func NewListener(addr, scheme string, tlscfg *tls.Config) (l net.Listener, err error) { - if l, err = newListener(addr, scheme); err != nil { - return nil, err - } - return wrapTLS(addr, scheme, tlscfg, l) -} - -func newListener(addr string, scheme string) (net.Listener, error) { - if scheme == "unix" || scheme == "unixs" { - // unix sockets via unix://laddr - return NewUnixListener(addr) - } - return net.Listen("tcp", addr) -} - -func wrapTLS(addr, scheme string, tlscfg *tls.Config, l net.Listener) (net.Listener, error) { - if scheme != "https" && scheme != "unixs" { - return l, nil - } - if tlscfg == nil { - l.Close() - return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr) - } - return tls.NewListener(l, tlscfg), nil -} - -type TLSInfo struct { - CertFile string - KeyFile string - CAFile string - TrustedCAFile string - ClientCertAuth bool - - // ServerName ensures the cert matches the given host in case of discovery / virtual hosting - ServerName string - - selfCert bool - - // parseFunc exists to simplify testing. Typically, parseFunc - // should be left nil. In that case, tls.X509KeyPair will be used. - parseFunc func([]byte, []byte) (tls.Certificate, error) -} - -func (info TLSInfo) String() string { - return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth) -} - -func (info TLSInfo) Empty() bool { - return info.CertFile == "" && info.KeyFile == "" -} - -func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { - if err = fileutil.TouchDirAll(dirpath); err != nil { - return - } - - certPath := filepath.Join(dirpath, "cert.pem") - keyPath := filepath.Join(dirpath, "key.pem") - _, errcert := os.Stat(certPath) - _, errkey := os.Stat(keyPath) - if errcert == nil && errkey == nil { - info.CertFile = certPath - info.KeyFile = keyPath - info.selfCert = true - return - } - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return - } - - tmpl := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{Organization: []string{"etcd"}}, - NotBefore: time.Now(), - NotAfter: time.Now().Add(365 * (24 * time.Hour)), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - for _, host := range hosts { - h, _, _ := net.SplitHostPort(host) - if ip := net.ParseIP(h); ip != nil { - tmpl.IPAddresses = append(tmpl.IPAddresses, ip) - } else { - tmpl.DNSNames = append(tmpl.DNSNames, h) - } - } - - priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) - if err != nil { - return - } - - certOut, err := os.Create(certPath) - if err != nil { - return - } - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() - - b, err := x509.MarshalECPrivateKey(priv) - if err != nil { - return - } - keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return - } - pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) - keyOut.Close() - - return SelfCert(dirpath, hosts) -} - -func (info TLSInfo) baseConfig() (*tls.Config, error) { - if info.KeyFile == "" || info.CertFile == "" { - return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) - } - - tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) - if err != nil { - return nil, err - } - - cfg := &tls.Config{ - Certificates: []tls.Certificate{*tlsCert}, - MinVersion: tls.VersionTLS12, - ServerName: info.ServerName, - } - return cfg, nil -} - -// cafiles returns a list of CA file paths. -func (info TLSInfo) cafiles() []string { - cs := make([]string, 0) - if info.CAFile != "" { - cs = append(cs, info.CAFile) - } - if info.TrustedCAFile != "" { - cs = append(cs, info.TrustedCAFile) - } - return cs -} - -// ServerConfig generates a tls.Config object for use by an HTTP server. -func (info TLSInfo) ServerConfig() (*tls.Config, error) { - cfg, err := info.baseConfig() - if err != nil { - return nil, err - } - - cfg.ClientAuth = tls.NoClientCert - if info.CAFile != "" || info.ClientCertAuth { - cfg.ClientAuth = tls.RequireAndVerifyClientCert - } - - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cp, err := tlsutil.NewCertPool(CAFiles) - if err != nil { - return nil, err - } - cfg.ClientCAs = cp - } - - // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server - cfg.NextProtos = []string{"h2"} - - return cfg, nil -} - -// ClientConfig generates a tls.Config object for use by an HTTP client. -func (info TLSInfo) ClientConfig() (*tls.Config, error) { - var cfg *tls.Config - var err error - - if !info.Empty() { - cfg, err = info.baseConfig() - if err != nil { - return nil, err - } - } else { - cfg = &tls.Config{ServerName: info.ServerName} - } - - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles) - if err != nil { - return nil, err - } - // if given a CA, trust any host with a cert signed by the CA - log.Println("warning: ignoring ServerName for user-provided CA for backwards compatibility is deprecated") - cfg.ServerName = "" - } - - if info.selfCert { - cfg.InsecureSkipVerify = true - } - return cfg, nil -} - -// ShallowCopyTLSConfig copies *tls.Config. This is only -// work-around for go-vet tests, which complains -// -// assignment copies lock value to p: crypto/tls.Config contains sync.Once contains sync.Mutex -// -// Keep up-to-date with 'go/src/crypto/tls/common.go' -func ShallowCopyTLSConfig(cfg *tls.Config) *tls.Config { - ncfg := tls.Config{ - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - SessionTicketKey: cfg.SessionTicketKey, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } - return &ncfg -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go deleted file mode 100644 index 7e8c02030fe..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "time" -) - -type timeoutConn struct { - net.Conn - wtimeoutd time.Duration - rdtimeoutd time.Duration -} - -func (c timeoutConn) Write(b []byte) (n int, err error) { - if c.wtimeoutd > 0 { - if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil { - return 0, err - } - } - return c.Conn.Write(b) -} - -func (c timeoutConn) Read(b []byte) (n int, err error) { - if c.rdtimeoutd > 0 { - if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil { - return 0, err - } - } - return c.Conn.Read(b) -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go deleted file mode 100644 index 6ae39ecfc9b..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "time" -) - -type rwTimeoutDialer struct { - wtimeoutd time.Duration - rdtimeoutd time.Duration - net.Dialer -} - -func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) { - conn, err := d.Dialer.Dial(network, address) - tconn := &timeoutConn{ - rdtimeoutd: d.rdtimeoutd, - wtimeoutd: d.wtimeoutd, - Conn: conn, - } - return tconn, err -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go deleted file mode 100644 index 0f4df5fbe3b..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "crypto/tls" - "net" - "time" -) - -// NewTimeoutListener returns a listener that listens on the given address. -// If read/write on the accepted connection blocks longer than its time limit, -// it will return timeout error. -func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { - ln, err := newListener(addr, scheme) - if err != nil { - return nil, err - } - ln = &rwTimeoutListener{ - Listener: ln, - rdtimeoutd: rdtimeoutd, - wtimeoutd: wtimeoutd, - } - if ln, err = wrapTLS(addr, scheme, tlscfg, ln); err != nil { - return nil, err - } - return ln, nil -} - -type rwTimeoutListener struct { - net.Listener - wtimeoutd time.Duration - rdtimeoutd time.Duration -} - -func (rwln *rwTimeoutListener) Accept() (net.Conn, error) { - c, err := rwln.Listener.Accept() - if err != nil { - return nil, err - } - return timeoutConn{ - Conn: c, - wtimeoutd: rwln.wtimeoutd, - rdtimeoutd: rwln.rdtimeoutd, - }, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go deleted file mode 100644 index ea16b4c0f86..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "net/http" - "time" -) - -// NewTimeoutTransport returns a transport created using the given TLS info. -// If read/write on the created connection blocks longer than its time limit, -// it will return timeout error. -// If read/write timeout is set, transport will not be able to reuse connection. -func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) { - tr, err := NewTransport(info, dialtimeoutd) - if err != nil { - return nil, err - } - - if rdtimeoutd != 0 || wtimeoutd != 0 { - // the timed out connection will timeout soon after it is idle. - // it should not be put back to http transport as an idle connection for future usage. - tr.MaxIdleConnsPerHost = -1 - } else { - // allow more idle connections between peers to avoid unnecessary port allocation. - tr.MaxIdleConnsPerHost = 1024 - } - - tr.Dial = (&rwTimeoutDialer{ - Dialer: net.Dialer{ - Timeout: dialtimeoutd, - KeepAlive: 30 * time.Second, - }, - rdtimeoutd: rdtimeoutd, - wtimeoutd: wtimeoutd, - }).Dial - return tr, nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/tls.go b/vendor/github.com/coreos/etcd/pkg/transport/tls.go deleted file mode 100644 index 62fe0d38519..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/tls.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "fmt" - "strings" - "time" -) - -// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those -// endpoints that could be validated as secure. -func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { - t, err := NewTransport(tlsInfo, 5*time.Second) - if err != nil { - return nil, err - } - var errs []string - var endpoints []string - for _, ep := range eps { - if !strings.HasPrefix(ep, "https://") { - errs = append(errs, fmt.Sprintf("%q is insecure", ep)) - continue - } - conn, cerr := t.Dial("tcp", ep[len("https://"):]) - if cerr != nil { - errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) - continue - } - conn.Close() - endpoints = append(endpoints, ep) - } - if len(errs) != 0 { - err = fmt.Errorf("%s", strings.Join(errs, ",")) - } - return endpoints, err -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/vendor/github.com/coreos/etcd/pkg/transport/transport.go deleted file mode 100644 index 4a7fe69d2e1..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/transport.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "net/http" - "strings" - "time" -) - -type unixTransport struct{ *http.Transport } - -func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) { - cfg, err := info.ClientConfig() - if err != nil { - return nil, err - } - - t := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: dialtimeoutd, - // value taken from http.DefaultTransport - KeepAlive: 30 * time.Second, - }).Dial, - // value taken from http.DefaultTransport - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: cfg, - } - - dialer := (&net.Dialer{ - Timeout: dialtimeoutd, - KeepAlive: 30 * time.Second, - }) - dial := func(net, addr string) (net.Conn, error) { - return dialer.Dial("unix", addr) - } - - tu := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: cfg, - } - ut := &unixTransport{tu} - - t.RegisterProtocol("unix", ut) - t.RegisterProtocol("unixs", ut) - - return t, nil -} - -func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) { - url := *req.URL - req.URL = &url - req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) - return urt.Transport.RoundTrip(req) -} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go deleted file mode 100644 index c126b6f7fa0..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package transport - -import ( - "net" - "os" -) - -type unixListener struct{ net.Listener } - -func NewUnixListener(addr string) (net.Listener, error) { - if err := os.RemoveAll(addr); err != nil { - return nil, err - } - l, err := net.Listen("unix", addr) - if err != nil { - return nil, err - } - return &unixListener{l}, nil -} - -func (ul *unixListener) Close() error { - if err := os.RemoveAll(ul.Addr().String()); err != nil { - return err - } - return ul.Listener.Close() -} diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go deleted file mode 100644 index de8ef0bd712..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package types declares various data types and implements type-checking -// functions. -package types diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go deleted file mode 100644 index 1b042d9ce65..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/id.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "strconv" -) - -// ID represents a generic identifier which is canonically -// stored as a uint64 but is typically represented as a -// base-16 string for input/output -type ID uint64 - -func (i ID) String() string { - return strconv.FormatUint(uint64(i), 16) -} - -// IDFromString attempts to create an ID from a base-16 string. -func IDFromString(s string) (ID, error) { - i, err := strconv.ParseUint(s, 16, 64) - return ID(i), err -} - -// IDSlice implements the sort interface -type IDSlice []ID - -func (p IDSlice) Len() int { return len(p) } -func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } -func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go deleted file mode 100644 index 73ef431bef1..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/set.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "sync" -) - -type Set interface { - Add(string) - Remove(string) - Contains(string) bool - Equals(Set) bool - Length() int - Values() []string - Copy() Set - Sub(Set) Set -} - -func NewUnsafeSet(values ...string) *unsafeSet { - set := &unsafeSet{make(map[string]struct{})} - for _, v := range values { - set.Add(v) - } - return set -} - -func NewThreadsafeSet(values ...string) *tsafeSet { - us := NewUnsafeSet(values...) - return &tsafeSet{us, sync.RWMutex{}} -} - -type unsafeSet struct { - d map[string]struct{} -} - -// Add adds a new value to the set (no-op if the value is already present) -func (us *unsafeSet) Add(value string) { - us.d[value] = struct{}{} -} - -// Remove removes the given value from the set -func (us *unsafeSet) Remove(value string) { - delete(us.d, value) -} - -// Contains returns whether the set contains the given value -func (us *unsafeSet) Contains(value string) (exists bool) { - _, exists = us.d[value] - return -} - -// ContainsAll returns whether the set contains all given values -func (us *unsafeSet) ContainsAll(values []string) bool { - for _, s := range values { - if !us.Contains(s) { - return false - } - } - return true -} - -// Equals returns whether the contents of two sets are identical -func (us *unsafeSet) Equals(other Set) bool { - v1 := sort.StringSlice(us.Values()) - v2 := sort.StringSlice(other.Values()) - v1.Sort() - v2.Sort() - return reflect.DeepEqual(v1, v2) -} - -// Length returns the number of elements in the set -func (us *unsafeSet) Length() int { - return len(us.d) -} - -// Values returns the values of the Set in an unspecified order. -func (us *unsafeSet) Values() (values []string) { - values = make([]string, 0) - for val := range us.d { - values = append(values, val) - } - return -} - -// Copy creates a new Set containing the values of the first -func (us *unsafeSet) Copy() Set { - cp := NewUnsafeSet() - for val := range us.d { - cp.Add(val) - } - - return cp -} - -// Sub removes all elements in other from the set -func (us *unsafeSet) Sub(other Set) Set { - oValues := other.Values() - result := us.Copy().(*unsafeSet) - - for _, val := range oValues { - if _, ok := result.d[val]; !ok { - continue - } - delete(result.d, val) - } - - return result -} - -type tsafeSet struct { - us *unsafeSet - m sync.RWMutex -} - -func (ts *tsafeSet) Add(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Add(value) -} - -func (ts *tsafeSet) Remove(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Remove(value) -} - -func (ts *tsafeSet) Contains(value string) (exists bool) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Contains(value) -} - -func (ts *tsafeSet) Equals(other Set) bool { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Equals(other) -} - -func (ts *tsafeSet) Length() int { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Length() -} - -func (ts *tsafeSet) Values() (values []string) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Values() -} - -func (ts *tsafeSet) Copy() Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Copy().(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} - -func (ts *tsafeSet) Sub(other Set) Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Sub(other).(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go deleted file mode 100644 index 0dd9ca798ae..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/slice.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -// Uint64Slice implements sort interface -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go deleted file mode 100644 index 9e5d03ff645..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/urls.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "errors" - "fmt" - "net" - "net/url" - "sort" - "strings" -) - -type URLs []url.URL - -func NewURLs(strs []string) (URLs, error) { - all := make([]url.URL, len(strs)) - if len(all) == 0 { - return nil, errors.New("no valid URLs given") - } - for i, in := range strs { - in = strings.TrimSpace(in) - u, err := url.Parse(in) - if err != nil { - return nil, err - } - if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { - return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) - } - if _, _, err := net.SplitHostPort(u.Host); err != nil { - return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) - } - if u.Path != "" { - return nil, fmt.Errorf("URL must not contain a path: %s", in) - } - all[i] = *u - } - us := URLs(all) - us.Sort() - - return us, nil -} - -func MustNewURLs(strs []string) URLs { - urls, err := NewURLs(strs) - if err != nil { - panic(err) - } - return urls -} - -func (us URLs) String() string { - return strings.Join(us.StringSlice(), ",") -} - -func (us *URLs) Sort() { - sort.Sort(us) -} -func (us URLs) Len() int { return len(us) } -func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } -func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } - -func (us URLs) StringSlice() []string { - out := make([]string, len(us)) - for i := range us { - out[i] = us[i].String() - } - - return out -} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go deleted file mode 100644 index 47690cc381a..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "sort" - "strings" -) - -// URLsMap is a map from a name to its URLs. -type URLsMap map[string]URLs - -// NewURLsMap returns a URLsMap instantiated from the given string, -// which consists of discovery-formatted names-to-URLs, like: -// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 -func NewURLsMap(s string) (URLsMap, error) { - m := parse(s) - - cl := URLsMap{} - for name, urls := range m { - us, err := NewURLs(urls) - if err != nil { - return nil, err - } - cl[name] = us - } - return cl, nil -} - -// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The -// string values in the map can be multiple values separated by the sep string. -func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { - var err error - um := URLsMap{} - for k, v := range m { - um[k], err = NewURLs(strings.Split(v, sep)) - if err != nil { - return nil, err - } - } - return um, nil -} - -// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. -func (c URLsMap) String() string { - var pairs []string - for name, urls := range c { - for _, url := range urls { - pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) - } - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -// URLs returns a list of all URLs. -// The returned list is sorted in ascending lexicographical order. -func (c URLsMap) URLs() []string { - var urls []string - for _, us := range c { - for _, u := range us { - urls = append(urls, u.String()) - } - } - sort.Strings(urls) - return urls -} - -// Len returns the size of URLsMap. -func (c URLsMap) Len() int { - return len(c) -} - -// parse parses the given string and returns a map listing the values specified for each key. -func parse(s string) map[string][]string { - m := make(map[string][]string) - for s != "" { - key := s - if i := strings.IndexAny(key, ","); i >= 0 { - key, s = key[:i], key[i+1:] - } else { - s = "" - } - if key == "" { - continue - } - value := "" - if i := strings.Index(key, "="); i >= 0 { - key, value = key[:i], key[i+1:] - } - m[key] = append(m[key], value) - } - return m -} diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait.go b/vendor/github.com/coreos/etcd/pkg/wait/wait.go deleted file mode 100644 index 0f31eeb9790..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/wait/wait.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package wait provides utility functions for polling, listening using Go -// channel. -package wait - -import ( - "log" - "sync" -) - -type Wait interface { - Register(id uint64) <-chan interface{} - Trigger(id uint64, x interface{}) - IsRegistered(id uint64) bool -} - -type List struct { - l sync.Mutex - m map[uint64]chan interface{} -} - -func New() *List { - return &List{m: make(map[uint64]chan interface{})} -} - -func (w *List) Register(id uint64) <-chan interface{} { - w.l.Lock() - defer w.l.Unlock() - ch := w.m[id] - if ch == nil { - ch = make(chan interface{}, 1) - w.m[id] = ch - } else { - log.Panicf("dup id %x", id) - } - return ch -} - -func (w *List) Trigger(id uint64, x interface{}) { - w.l.Lock() - ch := w.m[id] - delete(w.m, id) - w.l.Unlock() - if ch != nil { - ch <- x - close(ch) - } -} - -func (w *List) IsRegistered(id uint64) bool { - w.l.Lock() - defer w.l.Unlock() - _, ok := w.m[id] - return ok -} - -type waitWithResponse struct { - ch <-chan interface{} -} - -func NewWithResponse(ch <-chan interface{}) Wait { - return &waitWithResponse{ch: ch} -} - -func (w *waitWithResponse) Register(id uint64) <-chan interface{} { - return w.ch -} -func (w *waitWithResponse) Trigger(id uint64, x interface{}) {} -func (w *waitWithResponse) IsRegistered(id uint64) bool { - panic("waitWithResponse.IsRegistered() shouldn't be called") -} diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go b/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go deleted file mode 100644 index 297e48a47d7..00000000000 --- a/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wait - -import "sync" - -type WaitTime interface { - // Wait returns a chan that waits on the given logical deadline. - // The chan will be triggered when Trigger is called with a - // deadline that is later than the one it is waiting for. - Wait(deadline uint64) <-chan struct{} - // Trigger triggers all the waiting chans with an earlier logical deadline. - Trigger(deadline uint64) -} - -var closec chan struct{} - -func init() { closec = make(chan struct{}); close(closec) } - -type timeList struct { - l sync.Mutex - lastTriggerDeadline uint64 - m map[uint64]chan struct{} -} - -func NewTimeList() *timeList { - return &timeList{m: make(map[uint64]chan struct{})} -} - -func (tl *timeList) Wait(deadline uint64) <-chan struct{} { - tl.l.Lock() - defer tl.l.Unlock() - if tl.lastTriggerDeadline >= deadline { - return closec - } - ch := tl.m[deadline] - if ch == nil { - ch = make(chan struct{}) - tl.m[deadline] = ch - } - return ch -} - -func (tl *timeList) Trigger(deadline uint64) { - tl.l.Lock() - defer tl.l.Unlock() - tl.lastTriggerDeadline = deadline - for t, ch := range tl.m { - if t <= deadline { - delete(tl.m, t) - close(ch) - } - } -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/auth.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/auth.go deleted file mode 100644 index c1b75e36dbd..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/auth.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "golang.org/x/net/context" - - "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type AuthProxy struct { - client *clientv3.Client -} - -func NewAuthProxy(c *clientv3.Client) pb.AuthServer { - return &AuthProxy{client: c} -} - -func (ap *AuthProxy) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).AuthEnable(ctx, r) -} - -func (ap *AuthProxy) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).AuthDisable(ctx, r) -} - -func (ap *AuthProxy) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).Authenticate(ctx, r) -} - -func (ap *AuthProxy) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).RoleAdd(ctx, r) -} - -func (ap *AuthProxy) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).RoleDelete(ctx, r) -} - -func (ap *AuthProxy) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).RoleGet(ctx, r) -} - -func (ap *AuthProxy) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).RoleList(ctx, r) -} - -func (ap *AuthProxy) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).RoleRevokePermission(ctx, r) -} - -func (ap *AuthProxy) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).RoleGrantPermission(ctx, r) -} - -func (ap *AuthProxy) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).UserAdd(ctx, r) -} - -func (ap *AuthProxy) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).UserDelete(ctx, r) -} - -func (ap *AuthProxy) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).UserGet(ctx, r) -} - -func (ap *AuthProxy) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).UserList(ctx, r) -} - -func (ap *AuthProxy) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).UserGrantRole(ctx, r) -} - -func (ap *AuthProxy) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).UserRevokeRole(ctx, r) -} - -func (ap *AuthProxy) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - conn := ap.client.ActiveConnection() - return pb.NewAuthClient(conn).UserChangePassword(ctx, r) -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go deleted file mode 100644 index 155bbf90022..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cache - -import ( - "errors" - "sync" - "time" - - "github.com/karlseguin/ccache" - - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/adt" -) - -var ( - DefaultMaxEntries = 2048 - ErrCompacted = rpctypes.ErrGRPCCompacted -) - -const defaultHistoricTTL = time.Hour -const defaultCurrentTTL = time.Minute - -type Cache interface { - Add(req *pb.RangeRequest, resp *pb.RangeResponse) - Get(req *pb.RangeRequest) (*pb.RangeResponse, error) - Compact(revision int64) - Invalidate(key []byte, endkey []byte) - Close() -} - -// keyFunc returns the key of an request, which is used to look up in the cache for it's caching response. -func keyFunc(req *pb.RangeRequest) string { - // TODO: use marshalTo to reduce allocation - b, err := req.Marshal() - if err != nil { - panic(err) - } - return string(b) -} - -func NewCache(maxCacheEntries int) Cache { - return &cache{ - lru: ccache.New(ccache.Configure().MaxSize(int64(maxCacheEntries))), - compactedRev: -1, - } -} - -func (c *cache) Close() { c.lru.Stop() } - -// cache implements Cache -type cache struct { - mu sync.RWMutex - lru *ccache.Cache - - // a reverse index for cache invalidation - cachedRanges adt.IntervalTree - - compactedRev int64 -} - -// Add adds the response of a request to the cache if its revision is larger than the compacted revision of the cache. -func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) { - key := keyFunc(req) - - c.mu.Lock() - defer c.mu.Unlock() - - if req.Revision > c.compactedRev { - if req.Revision == 0 { - c.lru.Set(key, resp, defaultCurrentTTL) - } else { - c.lru.Set(key, resp, defaultHistoricTTL) - } - } - // we do not need to invalidate a request with a revision specified. - // so we do not need to add it into the reverse index. - if req.Revision != 0 { - return - } - - var ( - iv *adt.IntervalValue - ivl adt.Interval - ) - if len(req.RangeEnd) != 0 { - ivl = adt.NewStringAffineInterval(string(req.Key), string(req.RangeEnd)) - } else { - ivl = adt.NewStringAffinePoint(string(req.Key)) - } - - iv = c.cachedRanges.Find(ivl) - - if iv == nil { - c.cachedRanges.Insert(ivl, []string{key}) - } else { - iv.Val = append(iv.Val.([]string), key) - } -} - -// Get looks up the caching response for a given request. -// Get is also responsible for lazy eviction when accessing compacted entries. -func (c *cache) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) { - key := keyFunc(req) - - c.mu.RLock() - defer c.mu.RUnlock() - - if req.Revision < c.compactedRev { - c.lru.Delete(key) - return nil, ErrCompacted - } - - if item := c.lru.Get(key); item != nil { - return item.Value().(*pb.RangeResponse), nil - } - return nil, errors.New("not exist") -} - -// Invalidate invalidates the cache entries that intersecting with the given range from key to endkey. -func (c *cache) Invalidate(key, endkey []byte) { - c.mu.Lock() - defer c.mu.Unlock() - - var ( - ivs []*adt.IntervalValue - ivl adt.Interval - ) - if len(endkey) == 0 { - ivl = adt.NewStringAffinePoint(string(key)) - } else { - ivl = adt.NewStringAffineInterval(string(key), string(endkey)) - } - - ivs = c.cachedRanges.Stab(ivl) - for _, iv := range ivs { - keys := iv.Val.([]string) - for _, key := range keys { - c.lru.Delete(key) - } - } - // delete after removing all keys since it is destructive to 'ivs' - c.cachedRanges.Delete(ivl) -} - -// Compact invalidate all caching response before the given rev. -// Replace with the invalidation is lazy. The actual removal happens when the entries is accessed. -func (c *cache) Compact(revision int64) { - c.mu.Lock() - defer c.mu.Unlock() - - if revision > c.compactedRev { - c.compactedRev = revision - } -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go deleted file mode 100644 index 8a2fa16c124..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/cluster.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - - "golang.org/x/net/context" -) - -type clusterProxy struct { - client *clientv3.Client -} - -func NewClusterProxy(c *clientv3.Client) pb.ClusterServer { - return &clusterProxy{ - client: c, - } -} - -func (cp *clusterProxy) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberAdd(ctx, r) -} - -func (cp *clusterProxy) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberRemove(ctx, r) -} - -func (cp *clusterProxy) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberUpdate(ctx, r) -} - -func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - conn := cp.client.ActiveConnection() - return pb.NewClusterClient(conn).MemberList(ctx, r) -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/doc.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/doc.go deleted file mode 100644 index fc022e3c57e..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package grpcproxy is an OSI level 7 proxy for etcd v3 API requests. -package grpcproxy diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go deleted file mode 100644 index 36885135797..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/proxy/grpcproxy/cache" - - "golang.org/x/net/context" -) - -type kvProxy struct { - kv clientv3.KV - cache cache.Cache -} - -func NewKvProxy(c *clientv3.Client) (pb.KVServer, <-chan struct{}) { - kv := &kvProxy{ - kv: c.KV, - cache: cache.NewCache(cache.DefaultMaxEntries), - } - donec := make(chan struct{}) - go func() { - defer close(donec) - <-c.Ctx().Done() - kv.cache.Close() - }() - return kv, donec -} - -func (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if r.Serializable { - resp, err := p.cache.Get(r) - switch err { - case nil: - cacheHits.Inc() - return resp, nil - case cache.ErrCompacted: - cacheHits.Inc() - return nil, err - } - } - cachedMisses.Inc() - - resp, err := p.kv.Do(ctx, RangeRequestToOp(r)) - if err != nil { - return nil, err - } - - // cache linearizable as serializable - req := *r - req.Serializable = true - gresp := (*pb.RangeResponse)(resp.Get()) - p.cache.Add(&req, gresp) - - return gresp, nil -} - -func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - p.cache.Invalidate(r.Key, nil) - - resp, err := p.kv.Do(ctx, PutRequestToOp(r)) - return (*pb.PutResponse)(resp.Put()), err -} - -func (p *kvProxy) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - p.cache.Invalidate(r.Key, r.RangeEnd) - - resp, err := p.kv.Do(ctx, DelRequestToOp(r)) - return (*pb.DeleteRangeResponse)(resp.Del()), err -} - -func (p *kvProxy) txnToCache(reqs []*pb.RequestOp, resps []*pb.ResponseOp) { - for i := range resps { - switch tv := resps[i].Response.(type) { - case *pb.ResponseOp_ResponsePut: - p.cache.Invalidate(reqs[i].GetRequestPut().Key, nil) - case *pb.ResponseOp_ResponseDeleteRange: - rdr := reqs[i].GetRequestDeleteRange() - p.cache.Invalidate(rdr.Key, rdr.RangeEnd) - case *pb.ResponseOp_ResponseRange: - req := *(reqs[i].GetRequestRange()) - req.Serializable = true - p.cache.Add(&req, tv.ResponseRange) - } - } -} - -func (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - txn := p.kv.Txn(ctx) - cmps := make([]clientv3.Cmp, len(r.Compare)) - thenops := make([]clientv3.Op, len(r.Success)) - elseops := make([]clientv3.Op, len(r.Failure)) - - for i := range r.Compare { - cmps[i] = (clientv3.Cmp)(*r.Compare[i]) - } - - for i := range r.Success { - thenops[i] = requestOpToOp(r.Success[i]) - } - - for i := range r.Failure { - elseops[i] = requestOpToOp(r.Failure[i]) - } - - resp, err := txn.If(cmps...).Then(thenops...).Else(elseops...).Commit() - - if err != nil { - return nil, err - } - // txn may claim an outdated key is updated; be safe and invalidate - for _, cmp := range r.Compare { - p.cache.Invalidate(cmp.Key, nil) - } - // update any fetched keys - if resp.Succeeded { - p.txnToCache(r.Success, resp.Responses) - } else { - p.txnToCache(r.Failure, resp.Responses) - } - - return (*pb.TxnResponse)(resp), nil -} - -func (p *kvProxy) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { - var opts []clientv3.CompactOption - if r.Physical { - opts = append(opts, clientv3.WithCompactPhysical()) - } - - resp, err := p.kv.Compact(ctx, r.Revision, opts...) - if err == nil { - p.cache.Compact(r.Revision) - } - - return (*pb.CompactionResponse)(resp), err -} - -func requestOpToOp(union *pb.RequestOp) clientv3.Op { - switch tv := union.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange != nil { - return RangeRequestToOp(tv.RequestRange) - } - case *pb.RequestOp_RequestPut: - if tv.RequestPut != nil { - return PutRequestToOp(tv.RequestPut) - } - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange != nil { - return DelRequestToOp(tv.RequestDeleteRange) - } - } - panic("unknown request") -} - -func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op { - opts := []clientv3.OpOption{} - if len(r.RangeEnd) != 0 { - opts = append(opts, clientv3.WithRange(string(r.RangeEnd))) - } - opts = append(opts, clientv3.WithRev(r.Revision)) - opts = append(opts, clientv3.WithLimit(r.Limit)) - opts = append(opts, clientv3.WithSort( - clientv3.SortTarget(r.SortTarget), - clientv3.SortOrder(r.SortOrder)), - ) - opts = append(opts, clientv3.WithMaxCreateRev(r.MaxCreateRevision)) - opts = append(opts, clientv3.WithMinCreateRev(r.MinCreateRevision)) - opts = append(opts, clientv3.WithMaxModRev(r.MaxModRevision)) - opts = append(opts, clientv3.WithMinModRev(r.MinModRevision)) - - if r.Serializable { - opts = append(opts, clientv3.WithSerializable()) - } - - return clientv3.OpGet(string(r.Key), opts...) -} - -func PutRequestToOp(r *pb.PutRequest) clientv3.Op { - opts := []clientv3.OpOption{} - opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease))) - - return clientv3.OpPut(string(r.Key), string(r.Value), opts...) -} - -func DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op { - opts := []clientv3.OpOption{} - if len(r.RangeEnd) != 0 { - opts = append(opts, clientv3.WithRange(string(r.RangeEnd))) - } - if r.PrevKv { - opts = append(opts, clientv3.WithPrevKV()) - } - return clientv3.OpDelete(string(r.Key), opts...) -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go deleted file mode 100644 index 7880b18109d..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/kv_client_adapter.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -type kvs2kvc struct{ kvs pb.KVServer } - -func KvServerToKvClient(kvs pb.KVServer) pb.KVClient { - return &kvs2kvc{kvs} -} - -func (s *kvs2kvc) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (*pb.RangeResponse, error) { - return s.kvs.Range(ctx, in) -} - -func (s *kvs2kvc) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (*pb.PutResponse, error) { - return s.kvs.Put(ctx, in) -} - -func (s *kvs2kvc) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (*pb.DeleteRangeResponse, error) { - return s.kvs.DeleteRange(ctx, in) -} - -func (s *kvs2kvc) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (*pb.TxnResponse, error) { - return s.kvs.Txn(ctx, in) -} - -func (s *kvs2kvc) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (*pb.CompactionResponse, error) { - return s.kvs.Compact(ctx, in) -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go deleted file mode 100644 index 4f870220b79..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/lease.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "golang.org/x/net/context" - - "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type leaseProxy struct { - client *clientv3.Client -} - -func NewLeaseProxy(c *clientv3.Client) pb.LeaseServer { - return &leaseProxy{ - client: c, - } -} - -func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - conn := lp.client.ActiveConnection() - return pb.NewLeaseClient(conn).LeaseGrant(ctx, cr) -} - -func (lp *leaseProxy) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - conn := lp.client.ActiveConnection() - return pb.NewLeaseClient(conn).LeaseRevoke(ctx, rr) -} - -func (lp *leaseProxy) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { - conn := lp.client.ActiveConnection() - return pb.NewLeaseClient(conn).LeaseTimeToLive(ctx, rr) -} - -func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { - conn := lp.client.ActiveConnection() - ctx, cancel := context.WithCancel(stream.Context()) - lc, err := pb.NewLeaseClient(conn).LeaseKeepAlive(ctx) - if err != nil { - cancel() - return err - } - - go func() { - // Cancel the context attached to lc to unblock lc.Recv when - // this routine returns on error. - defer cancel() - - for { - // stream.Recv will be unblock when the loop in the parent routine - // returns on error. - rr, err := stream.Recv() - if err != nil { - return - } - err = lc.Send(rr) - if err != nil { - return - } - } - }() - - for { - rr, err := lc.Recv() - if err != nil { - return err - } - err = stream.Send(rr) - if err != nil { - return err - } - } -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go deleted file mode 100644 index 209dc94a712..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "golang.org/x/net/context" - - "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type maintenanceProxy struct { - client *clientv3.Client -} - -func NewMaintenanceProxy(c *clientv3.Client) pb.MaintenanceServer { - return &maintenanceProxy{ - client: c, - } -} - -func (mp *maintenanceProxy) Defragment(ctx context.Context, dr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - conn := mp.client.ActiveConnection() - return pb.NewMaintenanceClient(conn).Defragment(ctx, dr) -} - -func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenance_SnapshotServer) error { - conn := mp.client.ActiveConnection() - ctx, cancel := context.WithCancel(stream.Context()) - defer cancel() - - sc, err := pb.NewMaintenanceClient(conn).Snapshot(ctx, sr) - if err != nil { - return err - } - - for { - rr, err := sc.Recv() - if err != nil { - return err - } - err = stream.Send(rr) - if err != nil { - return err - } - } -} - -func (mp *maintenanceProxy) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { - conn := mp.client.ActiveConnection() - return pb.NewMaintenanceClient(conn).Hash(ctx, r) -} - -func (mp *maintenanceProxy) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { - conn := mp.client.ActiveConnection() - return pb.NewMaintenanceClient(conn).Alarm(ctx, r) -} - -func (mp *maintenanceProxy) Status(ctx context.Context, r *pb.StatusRequest) (*pb.StatusResponse, error) { - conn := mp.client.ActiveConnection() - return pb.NewMaintenanceClient(conn).Status(ctx, r) -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go deleted file mode 100644 index f4a1d4c8de4..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/metrics.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import "github.com/prometheus/client_golang/prometheus" - -var ( - watchersCoalescing = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "grpc_proxy", - Name: "watchers_coalescing_total", - Help: "Total number of current watchers coalescing", - }) - eventsCoalescing = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "grpc_proxy", - Name: "events_coalescing_total", - Help: "Total number of events coalescing", - }) - cacheHits = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "grpc_proxy", - Name: "cache_hits_total", - Help: "Total number of cache hits", - }) - cachedMisses = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "grpc_proxy", - Name: "cache_misses_total", - Help: "Total number of cache misses", - }) -) - -func init() { - prometheus.MustRegister(watchersCoalescing) - prometheus.MustRegister(eventsCoalescing) - prometheus.MustRegister(cacheHits) - prometheus.MustRegister(cachedMisses) -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go deleted file mode 100644 index 42d196ca2ca..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "sync" - - "golang.org/x/net/context" - "golang.org/x/time/rate" - "google.golang.org/grpc/metadata" - - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver/api/v3rpc" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -type watchProxy struct { - cw clientv3.Watcher - ctx context.Context - - ranges *watchRanges - - // retryLimiter controls the create watch retry rate on lost leaders. - retryLimiter *rate.Limiter - - // mu protects leaderc updates. - mu sync.RWMutex - leaderc chan struct{} - - // wg waits until all outstanding watch servers quit. - wg sync.WaitGroup -} - -const ( - lostLeaderKey = "__lostleader" // watched to detect leader loss - retryPerSecond = 10 -) - -func NewWatchProxy(c *clientv3.Client) (pb.WatchServer, <-chan struct{}) { - wp := &watchProxy{ - cw: c.Watcher, - ctx: clientv3.WithRequireLeader(c.Ctx()), - retryLimiter: rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond), - leaderc: make(chan struct{}), - } - wp.ranges = newWatchRanges(wp) - ch := make(chan struct{}) - go func() { - defer close(ch) - // a new streams without opening any watchers won't catch - // a lost leader event, so have a special watch to monitor it - rev := int64((uint64(1) << 63) - 2) - for wp.ctx.Err() == nil { - wch := wp.cw.Watch(wp.ctx, lostLeaderKey, clientv3.WithRev(rev)) - for range wch { - } - wp.mu.Lock() - close(wp.leaderc) - wp.leaderc = make(chan struct{}) - wp.mu.Unlock() - wp.retryLimiter.Wait(wp.ctx) - } - wp.mu.Lock() - <-wp.ctx.Done() - wp.mu.Unlock() - wp.wg.Wait() - wp.ranges.stop() - }() - return wp, ch -} - -func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { - wp.mu.Lock() - select { - case <-wp.ctx.Done(): - wp.mu.Unlock() - return - default: - wp.wg.Add(1) - } - wp.mu.Unlock() - - ctx, cancel := context.WithCancel(stream.Context()) - wps := &watchProxyStream{ - ranges: wp.ranges, - watchers: make(map[int64]*watcher), - stream: stream, - watchCh: make(chan *pb.WatchResponse, 1024), - ctx: ctx, - cancel: cancel, - } - - var leaderc <-chan struct{} - if md, ok := metadata.FromContext(stream.Context()); ok { - v := md[rpctypes.MetadataRequireLeaderKey] - if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { - leaderc = wp.lostLeaderNotify() - } - } - - // post to stopc => terminate server stream; can't use a waitgroup - // since all goroutines will only terminate after Watch() exits. - stopc := make(chan struct{}, 3) - go func() { - defer func() { stopc <- struct{}{} }() - wps.recvLoop() - }() - go func() { - defer func() { stopc <- struct{}{} }() - wps.sendLoop() - }() - // tear down watch if leader goes down or entire watch proxy is terminated - go func() { - defer func() { stopc <- struct{}{} }() - select { - case <-leaderc: - case <-ctx.Done(): - case <-wp.ctx.Done(): - } - }() - - <-stopc - cancel() - - // recv/send may only shutdown after function exits; - // goroutine notifies proxy that stream is through - go func() { - <-stopc - <-stopc - wps.close() - wp.wg.Done() - }() - - select { - case <-leaderc: - return rpctypes.ErrNoLeader - default: - return wps.ctx.Err() - } -} - -func (wp *watchProxy) lostLeaderNotify() <-chan struct{} { - wp.mu.RLock() - defer wp.mu.RUnlock() - return wp.leaderc -} - -// watchProxyStream forwards etcd watch events to a proxied client stream. -type watchProxyStream struct { - ranges *watchRanges - - // mu protects watchers and nextWatcherID - mu sync.Mutex - // watchers receive events from watch broadcast. - watchers map[int64]*watcher - // nextWatcherID is the id to assign the next watcher on this stream. - nextWatcherID int64 - - stream pb.Watch_WatchServer - - // watchCh receives watch responses from the watchers. - watchCh chan *pb.WatchResponse - - ctx context.Context - cancel context.CancelFunc -} - -func (wps *watchProxyStream) close() { - var wg sync.WaitGroup - wps.cancel() - wps.mu.Lock() - wg.Add(len(wps.watchers)) - for _, wpsw := range wps.watchers { - go func(w *watcher) { - wps.ranges.delete(w) - wg.Done() - }(wpsw) - } - wps.watchers = nil - wps.mu.Unlock() - - wg.Wait() - - close(wps.watchCh) -} - -func (wps *watchProxyStream) recvLoop() error { - for { - req, err := wps.stream.Recv() - if err != nil { - return err - } - switch uv := req.RequestUnion.(type) { - case *pb.WatchRequest_CreateRequest: - cr := uv.CreateRequest - w := &watcher{ - wr: watchRange{string(cr.Key), string(cr.RangeEnd)}, - id: wps.nextWatcherID, - wps: wps, - - nextrev: cr.StartRevision, - progress: cr.ProgressNotify, - prevKV: cr.PrevKv, - filters: v3rpc.FiltersFromRequest(cr), - } - if !w.wr.valid() { - w.post(&pb.WatchResponse{WatchId: -1, Created: true, Canceled: true}) - continue - } - wps.nextWatcherID++ - w.nextrev = cr.StartRevision - wps.watchers[w.id] = w - wps.ranges.add(w) - case *pb.WatchRequest_CancelRequest: - wps.delete(uv.CancelRequest.WatchId) - default: - panic("not implemented") - } - } -} - -func (wps *watchProxyStream) sendLoop() { - for { - select { - case wresp, ok := <-wps.watchCh: - if !ok { - return - } - if err := wps.stream.Send(wresp); err != nil { - return - } - case <-wps.ctx.Done(): - return - } - } -} - -func (wps *watchProxyStream) delete(id int64) { - wps.mu.Lock() - defer wps.mu.Unlock() - - w, ok := wps.watchers[id] - if !ok { - return - } - wps.ranges.delete(w) - delete(wps.watchers, id) - resp := &pb.WatchResponse{ - Header: &w.lastHeader, - WatchId: id, - Canceled: true, - } - wps.watchCh <- resp -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go deleted file mode 100644 index 5529fb5a2bc..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcast.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "sync" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" -) - -// watchBroadcast broadcasts a server watcher to many client watchers. -type watchBroadcast struct { - // cancel stops the underlying etcd server watcher and closes ch. - cancel context.CancelFunc - donec chan struct{} - - // mu protects rev and receivers. - mu sync.RWMutex - // nextrev is the minimum expected next revision of the watcher on ch. - nextrev int64 - // receivers contains all the client-side watchers to serve. - receivers map[*watcher]struct{} - // responses counts the number of responses - responses int -} - -func newWatchBroadcast(wp *watchProxy, w *watcher, update func(*watchBroadcast)) *watchBroadcast { - cctx, cancel := context.WithCancel(wp.ctx) - wb := &watchBroadcast{ - cancel: cancel, - nextrev: w.nextrev, - receivers: make(map[*watcher]struct{}), - donec: make(chan struct{}), - } - wb.add(w) - go func() { - defer close(wb.donec) - // loop because leader loss will close channel - for cctx.Err() == nil { - opts := []clientv3.OpOption{ - clientv3.WithRange(w.wr.end), - clientv3.WithProgressNotify(), - clientv3.WithRev(wb.nextrev), - clientv3.WithPrevKV(), - } - // The create notification should be the first response; - // if the watch is recreated following leader loss, it - // shouldn't post a second create response to the client. - if wb.responses == 0 { - opts = append(opts, clientv3.WithCreatedNotify()) - } - wch := wp.cw.Watch(cctx, w.wr.key, opts...) - - for wr := range wch { - wb.bcast(wr) - update(wb) - } - wp.retryLimiter.Wait(cctx) - } - }() - return wb -} - -func (wb *watchBroadcast) bcast(wr clientv3.WatchResponse) { - wb.mu.Lock() - defer wb.mu.Unlock() - // watchers start on the given revision, if any; ignore header rev on create - if wb.responses > 0 || wb.nextrev == 0 { - wb.nextrev = wr.Header.Revision + 1 - } - wb.responses++ - for r := range wb.receivers { - r.send(wr) - } - if len(wb.receivers) > 0 { - eventsCoalescing.Add(float64(len(wb.receivers) - 1)) - } -} - -// add puts a watcher into receiving a broadcast if its revision at least -// meets the broadcast revision. Returns true if added. -func (wb *watchBroadcast) add(w *watcher) bool { - wb.mu.Lock() - defer wb.mu.Unlock() - if wb.nextrev > w.nextrev || (wb.nextrev == 0 && w.nextrev != 0) { - // wb is too far ahead, w will miss events - // or wb is being established with a current watcher - return false - } - if wb.responses == 0 { - // Newly created; create event will be sent by etcd. - wb.receivers[w] = struct{}{} - return true - } - // already sent by etcd; emulate create event - ok := w.post(&pb.WatchResponse{ - Header: &pb.ResponseHeader{ - // todo: fill in ClusterId - // todo: fill in MemberId: - Revision: w.nextrev, - // todo: fill in RaftTerm: - }, - WatchId: w.id, - Created: true, - }) - if !ok { - return false - } - wb.receivers[w] = struct{}{} - watchersCoalescing.Inc() - - return true -} -func (wb *watchBroadcast) delete(w *watcher) { - wb.mu.Lock() - defer wb.mu.Unlock() - if _, ok := wb.receivers[w]; !ok { - panic("deleting missing watcher from broadcast") - } - delete(wb.receivers, w) - if len(wb.receivers) > 0 { - // do not dec the only left watcher for coalescing. - watchersCoalescing.Dec() - } -} - -func (wb *watchBroadcast) size() int { - wb.mu.RLock() - defer wb.mu.RUnlock() - return len(wb.receivers) -} - -func (wb *watchBroadcast) empty() bool { return wb.size() == 0 } - -func (wb *watchBroadcast) stop() { - if !wb.empty() { - // do not dec the only left watcher for coalescing. - watchersCoalescing.Sub(float64(wb.size() - 1)) - } - - wb.cancel() - <-wb.donec -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcasts.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcasts.go deleted file mode 100644 index 8fe9e5f512e..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_broadcasts.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "sync" -) - -type watchBroadcasts struct { - wp *watchProxy - - // mu protects bcasts and watchers from the coalesce loop. - mu sync.Mutex - bcasts map[*watchBroadcast]struct{} - watchers map[*watcher]*watchBroadcast - - updatec chan *watchBroadcast - donec chan struct{} -} - -// maxCoalesceRecievers prevents a popular watchBroadcast from being coalseced. -const maxCoalesceReceivers = 5 - -func newWatchBroadcasts(wp *watchProxy) *watchBroadcasts { - wbs := &watchBroadcasts{ - wp: wp, - bcasts: make(map[*watchBroadcast]struct{}), - watchers: make(map[*watcher]*watchBroadcast), - updatec: make(chan *watchBroadcast, 1), - donec: make(chan struct{}), - } - go func() { - defer close(wbs.donec) - for wb := range wbs.updatec { - wbs.coalesce(wb) - } - }() - return wbs -} - -func (wbs *watchBroadcasts) coalesce(wb *watchBroadcast) { - if wb.size() >= maxCoalesceReceivers { - return - } - wbs.mu.Lock() - for wbswb := range wbs.bcasts { - if wbswb == wb { - continue - } - wb.mu.Lock() - wbswb.mu.Lock() - // 1. check if wbswb is behind wb so it won't skip any events in wb - // 2. ensure wbswb started; nextrev == 0 may mean wbswb is waiting - // for a current watcher and expects a create event from the server. - if wb.nextrev >= wbswb.nextrev && wbswb.responses > 0 { - for w := range wb.receivers { - wbswb.receivers[w] = struct{}{} - wbs.watchers[w] = wbswb - } - wb.receivers = nil - } - wbswb.mu.Unlock() - wb.mu.Unlock() - if wb.empty() { - delete(wbs.bcasts, wb) - wb.stop() - break - } - } - wbs.mu.Unlock() -} - -func (wbs *watchBroadcasts) add(w *watcher) { - wbs.mu.Lock() - defer wbs.mu.Unlock() - // find fitting bcast - for wb := range wbs.bcasts { - if wb.add(w) { - wbs.watchers[w] = wb - return - } - } - // no fit; create a bcast - wb := newWatchBroadcast(wbs.wp, w, wbs.update) - wbs.watchers[w] = wb - wbs.bcasts[wb] = struct{}{} -} - -// delete removes a watcher and returns the number of remaining watchers. -func (wbs *watchBroadcasts) delete(w *watcher) int { - wbs.mu.Lock() - defer wbs.mu.Unlock() - - wb, ok := wbs.watchers[w] - if !ok { - panic("deleting missing watcher from broadcasts") - } - delete(wbs.watchers, w) - wb.delete(w) - if wb.empty() { - delete(wbs.bcasts, wb) - wb.stop() - } - return len(wbs.bcasts) -} - -func (wbs *watchBroadcasts) stop() { - wbs.mu.Lock() - for wb := range wbs.bcasts { - wb.stop() - } - wbs.bcasts = nil - close(wbs.updatec) - wbs.mu.Unlock() - <-wbs.donec -} - -func (wbs *watchBroadcasts) update(wb *watchBroadcast) { - select { - case wbs.updatec <- wb: - default: - } -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go deleted file mode 100644 index 283c2ed07fa..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_client_adapter.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "errors" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -var errAlreadySentHeader = errors.New("grpcproxy: already send header") - -type ws2wc struct{ wserv pb.WatchServer } - -func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient { - return &ws2wc{wserv} -} - -func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) { - // ch1 is buffered so server can send error on close - ch1, ch2 := make(chan interface{}, 1), make(chan interface{}) - headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1) - - cctx, ccancel := context.WithCancel(ctx) - cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel} - wclient := &ws2wcClientStream{chanClientStream{headerc, trailerc, cli}} - - sctx, scancel := context.WithCancel(ctx) - srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel} - wserver := &ws2wcServerStream{chanServerStream{headerc, trailerc, srv, nil}} - go func() { - if err := s.wserv.Watch(wserver); err != nil { - select { - case srv.sendc <- err: - case <-sctx.Done(): - case <-cctx.Done(): - } - } - scancel() - ccancel() - }() - return wclient, nil -} - -// ws2wcClientStream implements Watch_WatchClient -type ws2wcClientStream struct{ chanClientStream } - -// ws2wcServerStream implements Watch_WatchServer -type ws2wcServerStream struct{ chanServerStream } - -func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error { - return s.SendMsg(wr) -} -func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.WatchResponse), nil -} - -func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error { - return s.SendMsg(wr) -} -func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) { - var v interface{} - if err := s.RecvMsg(&v); err != nil { - return nil, err - } - return v.(*pb.WatchRequest), nil -} - -// chanServerStream implements grpc.ServerStream with a chanStream -type chanServerStream struct { - headerc chan<- metadata.MD - trailerc chan<- metadata.MD - grpc.Stream - - headers []metadata.MD -} - -func (ss *chanServerStream) SendHeader(md metadata.MD) error { - if ss.headerc == nil { - return errAlreadySentHeader - } - outmd := make(map[string][]string) - for _, h := range append(ss.headers, md) { - for k, v := range h { - outmd[k] = v - } - } - select { - case ss.headerc <- outmd: - ss.headerc = nil - ss.headers = nil - return nil - case <-ss.Context().Done(): - } - return ss.Context().Err() -} - -func (ss *chanServerStream) SetHeader(md metadata.MD) error { - if ss.headerc == nil { - return errAlreadySentHeader - } - ss.headers = append(ss.headers, md) - return nil -} - -func (ss *chanServerStream) SetTrailer(md metadata.MD) { - ss.trailerc <- md -} - -// chanClientStream implements grpc.ClientStream with a chanStream -type chanClientStream struct { - headerc <-chan metadata.MD - trailerc <-chan metadata.MD - *chanStream -} - -func (cs *chanClientStream) Header() (metadata.MD, error) { - select { - case md := <-cs.headerc: - return md, nil - case <-cs.Context().Done(): - } - return nil, cs.Context().Err() -} - -func (cs *chanClientStream) Trailer() metadata.MD { - select { - case md := <-cs.trailerc: - return md - case <-cs.Context().Done(): - return nil - } -} - -func (s *chanClientStream) CloseSend() error { - close(s.chanStream.sendc) - return nil -} - -// chanStream implements grpc.Stream using channels -type chanStream struct { - recvc <-chan interface{} - sendc chan<- interface{} - ctx context.Context - cancel context.CancelFunc -} - -func (s *chanStream) Context() context.Context { return s.ctx } - -func (s *chanStream) SendMsg(m interface{}) error { - select { - case s.sendc <- m: - if err, ok := m.(error); ok { - return err - } - return nil - case <-s.ctx.Done(): - } - return s.ctx.Err() -} - -func (s *chanStream) RecvMsg(m interface{}) error { - v := m.(*interface{}) - select { - case msg, ok := <-s.recvc: - if !ok { - return grpc.ErrClientConnClosing - } - if err, ok := msg.(error); ok { - return err - } - *v = msg - return nil - case <-s.ctx.Done(): - } - return s.ctx.Err() -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_ranges.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_ranges.go deleted file mode 100644 index 31c6b5925ea..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watch_ranges.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "sync" -) - -// watchRanges tracks all open watches for the proxy. -type watchRanges struct { - wp *watchProxy - - mu sync.Mutex - bcasts map[watchRange]*watchBroadcasts -} - -func newWatchRanges(wp *watchProxy) *watchRanges { - return &watchRanges{ - wp: wp, - bcasts: make(map[watchRange]*watchBroadcasts), - } -} - -func (wrs *watchRanges) add(w *watcher) { - wrs.mu.Lock() - defer wrs.mu.Unlock() - - if wbs := wrs.bcasts[w.wr]; wbs != nil { - wbs.add(w) - return - } - wbs := newWatchBroadcasts(wrs.wp) - wrs.bcasts[w.wr] = wbs - wbs.add(w) -} - -func (wrs *watchRanges) delete(w *watcher) { - wrs.mu.Lock() - defer wrs.mu.Unlock() - wbs, ok := wrs.bcasts[w.wr] - if !ok { - panic("deleting missing range") - } - if wbs.delete(w) == 0 { - wbs.stop() - delete(wrs.bcasts, w.wr) - } -} - -func (wrs *watchRanges) stop() { - wrs.mu.Lock() - defer wrs.mu.Unlock() - for _, wb := range wrs.bcasts { - wb.stop() - } - wrs.bcasts = nil -} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go deleted file mode 100644 index e860a69ce81..00000000000 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/watcher.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package grpcproxy - -import ( - "time" - - "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/mvccpb" -) - -type watchRange struct { - key, end string -} - -func (wr *watchRange) valid() bool { - return len(wr.end) == 0 || wr.end > wr.key || (wr.end[0] == 0 && len(wr.end) == 1) -} - -type watcher struct { - // user configuration - - wr watchRange - filters []mvcc.FilterFunc - progress bool - prevKV bool - - // id is the id returned to the client on its watch stream. - id int64 - // nextrev is the minimum expected next event revision. - nextrev int64 - // lastHeader has the last header sent over the stream. - lastHeader pb.ResponseHeader - - // wps is the parent. - wps *watchProxyStream -} - -// send filters out repeated events by discarding revisions older -// than the last one sent over the watch channel. -func (w *watcher) send(wr clientv3.WatchResponse) { - if wr.IsProgressNotify() && !w.progress { - return - } - if w.nextrev > wr.Header.Revision && len(wr.Events) > 0 { - return - } - if w.nextrev == 0 { - // current watch; expect updates following this revision - w.nextrev = wr.Header.Revision + 1 - } - - events := make([]*mvccpb.Event, 0, len(wr.Events)) - - var lastRev int64 - for i := range wr.Events { - ev := (*mvccpb.Event)(wr.Events[i]) - if ev.Kv.ModRevision < w.nextrev { - continue - } else { - // We cannot update w.rev here. - // txn can have multiple events with the same rev. - // If w.nextrev updates here, it would skip events in the same txn. - lastRev = ev.Kv.ModRevision - } - - filtered := false - for _, filter := range w.filters { - if filter(*ev) { - filtered = true - break - } - } - if filtered { - continue - } - - if !w.prevKV { - evCopy := *ev - evCopy.PrevKv = nil - ev = &evCopy - } - events = append(events, ev) - } - - if lastRev >= w.nextrev { - w.nextrev = lastRev + 1 - } - - // all events are filtered out? - if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 { - return - } - - w.lastHeader = wr.Header - w.post(&pb.WatchResponse{ - Header: &wr.Header, - Created: wr.Created, - WatchId: w.id, - Events: events, - }) -} - -// post puts a watch response on the watcher's proxy stream channel -func (w *watcher) post(wr *pb.WatchResponse) bool { - select { - case w.wps.watchCh <- wr: - case <-time.After(50 * time.Millisecond): - w.wps.cancel() - return false - } - return true -} diff --git a/vendor/github.com/coreos/etcd/raft/doc.go b/vendor/github.com/coreos/etcd/raft/doc.go deleted file mode 100644 index b55c591ff5d..00000000000 --- a/vendor/github.com/coreos/etcd/raft/doc.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package raft sends and receives messages in the Protocol Buffer format -defined in the raftpb package. - -Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. -The state machine is kept in sync through the use of a replicated log. -For more details on Raft, see "In Search of an Understandable Consensus Algorithm" -(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. - -A simple example application, _raftexample_, is also available to help illustrate -how to use this package in practice: -https://github.com/coreos/etcd/tree/master/contrib/raftexample - -Usage - -The primary object in raft is a Node. You either start a Node from scratch -using raft.StartNode or start a Node from some initial state using raft.RestartNode. - -To start a node from scratch: - - storage := raft.NewMemoryStorage() - c := &Config{ - ID: 0x01, - ElectionTick: 10, - HeartbeatTick: 1, - Storage: storage, - MaxSizePerMsg: 4096, - MaxInflightMsgs: 256, - } - n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) - -To restart a node from previous state: - - storage := raft.NewMemoryStorage() - - // recover the in-memory storage from persistent - // snapshot, state and entries. - storage.ApplySnapshot(snapshot) - storage.SetHardState(state) - storage.Append(entries) - - c := &Config{ - ID: 0x01, - ElectionTick: 10, - HeartbeatTick: 1, - Storage: storage, - MaxSizePerMsg: 4096, - MaxInflightMsgs: 256, - } - - // restart raft without peer information. - // peer information is already included in the storage. - n := raft.RestartNode(c) - -Now that you are holding onto a Node you have a few responsibilities: - -First, you must read from the Node.Ready() channel and process the updates -it contains. These steps may be performed in parallel, except as noted in step -2. - -1. Write HardState, Entries, and Snapshot to persistent storage if they are -not empty. Note that when writing an Entry with Index i, any -previously-persisted entries with Index >= i must be discarded. - -2. Send all Messages to the nodes named in the To field. It is important that -no messages be sent until the latest HardState has been persisted to disk, -and all Entries written by any previous Ready batch (Messages may be sent while -entries from the same batch are being persisted). To reduce the I/O latency, an -optimization can be applied to make leader write to disk in parallel with its -followers (as explained at section 10.2.1 in Raft thesis). If any Message has type -MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be -large). - -Note: Marshalling messages is not thread-safe; it is important that you -make sure that no new entries are persisted while marshalling. -The easiest way to achieve this is to serialise the messages directly inside -your main raft loop. - -3. Apply Snapshot (if any) and CommittedEntries to the state machine. -If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() -to apply it to the node. The configuration change may be cancelled at this point -by setting the NodeID field to zero before calling ApplyConfChange -(but ApplyConfChange must be called one way or the other, and the decision to cancel -must be based solely on the state machine and not external information such as -the observed health of the node). - -4. Call Node.Advance() to signal readiness for the next batch of updates. -This may be done at any time after step 1, although all updates must be processed -in the order they were returned by Ready. - -Second, all persisted log entries must be made available via an -implementation of the Storage interface. The provided MemoryStorage -type can be used for this (if you repopulate its state upon a -restart), or you can supply your own disk-backed implementation. - -Third, when you receive a message from another node, pass it to Node.Step: - - func recvRaftRPC(ctx context.Context, m raftpb.Message) { - n.Step(ctx, m) - } - -Finally, you need to call Node.Tick() at regular intervals (probably -via a time.Ticker). Raft has two important timeouts: heartbeat and the -election timeout. However, internally to the raft package time is -represented by an abstract "tick". - -The total state machine handling loop will look something like this: - - for { - select { - case <-s.Ticker: - n.Tick() - case rd := <-s.Node.Ready(): - saveToStorage(rd.State, rd.Entries, rd.Snapshot) - send(rd.Messages) - if !raft.IsEmptySnap(rd.Snapshot) { - processSnapshot(rd.Snapshot) - } - for _, entry := range rd.CommittedEntries { - process(entry) - if entry.Type == raftpb.EntryConfChange { - var cc raftpb.ConfChange - cc.Unmarshal(entry.Data) - s.Node.ApplyConfChange(cc) - } - } - s.Node.Advance() - case <-s.done: - return - } - } - -To propose changes to the state machine from your node take your application -data, serialize it into a byte slice and call: - - n.Propose(ctx, data) - -If the proposal is committed, data will appear in committed entries with type -raftpb.EntryNormal. There is no guarantee that a proposed command will be -committed; you may have to re-propose after a timeout. - -To add or remove node in a cluster, build ConfChange struct 'cc' and call: - - n.ProposeConfChange(ctx, cc) - -After config change is committed, some committed entry with type -raftpb.EntryConfChange will be returned. You must apply it to node through: - - var cc raftpb.ConfChange - cc.Unmarshal(data) - n.ApplyConfChange(cc) - -Note: An ID represents a unique node in a cluster for all time. A -given ID MUST be used only once even if the old node has been removed. -This means that for example IP addresses make poor node IDs since they -may be reused. Node IDs must be non-zero. - -Implementation notes - -This implementation is up to date with the final Raft thesis -(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our -implementation of the membership change protocol differs somewhat from -that described in chapter 4. The key invariant that membership changes -happen one node at a time is preserved, but in our implementation the -membership change takes effect when its entry is applied, not when it -is added to the log (so the entry is committed under the old -membership instead of the new). This is equivalent in terms of safety, -since the old and new configurations are guaranteed to overlap. - -To ensure that we do not attempt to commit two membership changes at -once by matching log positions (which would be unsafe since they -should have different quorum requirements), we simply disallow any -proposed membership change while any uncommitted change appears in -the leader's log. - -This approach introduces a problem when you try to remove a member -from a two-member cluster: If one of the members dies before the -other one receives the commit of the confchange entry, then the member -cannot be removed any more since the cluster cannot make progress. -For this reason it is highly recommended to use three or more nodes in -every cluster. - -MessageType - -Package raft sends and receives message in Protocol Buffer format (defined -in raftpb package). Each state (follower, candidate, leader) implements its -own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when -advancing with the given raftpb.Message. Each step is determined by its -raftpb.MessageType. Note that every step is checked by one common method -'Step' that safety-checks the terms of node and incoming message to prevent -stale log entries: - - 'MsgHup' is used for election. If a node is a follower or candidate, the - 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or - candidate has not received any heartbeat before the election timeout, it - passes 'MsgHup' to its Step method and becomes (or remains) a candidate to - start a new election. - - 'MsgBeat' is an internal type that signals the leader to send a heartbeat of - the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in - the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to - send periodic 'MsgHeartbeat' messages to its followers. - - 'MsgProp' proposes to append data to its log entries. This is a special - type to redirect proposals to leader. Therefore, send method overwrites - raftpb.Message's term with its HardState's term to avoid attaching its - local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step' - method, the leader first calls the 'appendEntry' method to append entries - to its log, and then calls 'bcastAppend' method to send those entries to - its peers. When passed to candidate, 'MsgProp' is dropped. When passed to - follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send - method. It is stored with sender's ID and later forwarded to leader by - rafthttp package. - - 'MsgApp' contains log entries to replicate. A leader calls bcastAppend, - which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp' - type. When 'MsgApp' is passed to candidate's Step method, candidate reverts - back to follower, because it indicates that there is a valid leader sending - 'MsgApp' messages. Candidate and follower respond to this message in - 'MsgAppResp' type. - - 'MsgAppResp' is response to log replication request('MsgApp'). When - 'MsgApp' is passed to candidate or follower's Step method, it responds by - calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft - mailbox. - - 'MsgVote' requests votes for election. When a node is a follower or - candidate and 'MsgHup' is passed to its Step method, then the node calls - 'campaign' method to campaign itself to become a leader. Once 'campaign' - method is called, the node becomes candidate and sends 'MsgVote' to peers - in cluster to request votes. When passed to leader or candidate's Step - method and the message's Term is lower than leader's or candidate's, - 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true). - If leader or candidate receives 'MsgVote' with higher term, it will revert - back to follower. When 'MsgVote' is passed to follower, it votes for the - sender only when sender's last term is greater than MsgVote's term or - sender's last term is equal to MsgVote's term but sender's last committed - index is greater than or equal to follower's. - - 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is - passed to candidate, the candidate calculates how many votes it has won. If - it's more than majority (quorum), it becomes leader and calls 'bcastAppend'. - If candidate receives majority of votes of denials, it reverts back to - follower. - - 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election - protocol. When Config.PreVote is true, a pre-election is carried out first - (using the same rules as a regular election), and no node increases its term - number unless the pre-election indicates that the campaigining node would win. - This minimizes disruption when a partitioned node rejoins the cluster. - - 'MsgSnap' requests to install a snapshot message. When a node has just - become a leader or the leader receives 'MsgProp' message, it calls - 'bcastAppend' method, which then calls 'sendAppend' method to each - follower. In 'sendAppend', if a leader fails to get term or entries, - the leader requests snapshot by sending 'MsgSnap' type message. - - 'MsgSnapStatus' tells the result of snapshot install message. When a - follower rejected 'MsgSnap', it indicates the snapshot request with - 'MsgSnap' had failed from network issues which causes the network layer - to fail to send out snapshots to its followers. Then leader considers - follower's progress as probe. When 'MsgSnap' were not rejected, it - indicates that the snapshot succeeded and the leader sets follower's - progress to probe and resumes its log replication. - - 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed - to candidate and message's term is higher than candidate's, the candidate - reverts back to follower and updates its committed index from the one in - this heartbeat. And it sends the message to its mailbox. When - 'MsgHeartbeat' is passed to follower's Step method and message's term is - higher than follower's, the follower updates its leaderID with the ID - from the message. - - 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp' - is passed to leader's Step method, the leader knows which follower - responded. And only when the leader's last committed index is greater than - follower's Match index, the leader runs 'sendAppend` method. - - 'MsgUnreachable' tells that request(message) wasn't delivered. When - 'MsgUnreachable' is passed to leader's Step method, the leader discovers - that the follower that sent this 'MsgUnreachable' is not reachable, often - indicating 'MsgApp' is lost. When follower's progress state is replicate, - the leader sets it back to probe. - -*/ -package raft diff --git a/vendor/github.com/coreos/etcd/raft/log.go b/vendor/github.com/coreos/etcd/raft/log.go deleted file mode 100644 index c3036d3c90d..00000000000 --- a/vendor/github.com/coreos/etcd/raft/log.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "fmt" - "log" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -type raftLog struct { - // storage contains all stable entries since the last snapshot. - storage Storage - - // unstable contains all unstable entries and snapshot. - // they will be saved into storage. - unstable unstable - - // committed is the highest log position that is known to be in - // stable storage on a quorum of nodes. - committed uint64 - // applied is the highest log position that the application has - // been instructed to apply to its state machine. - // Invariant: applied <= committed - applied uint64 - - logger Logger -} - -// newLog returns log using the given storage. It recovers the log to the state -// that it just commits and applies the latest snapshot. -func newLog(storage Storage, logger Logger) *raftLog { - if storage == nil { - log.Panic("storage must not be nil") - } - log := &raftLog{ - storage: storage, - logger: logger, - } - firstIndex, err := storage.FirstIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - lastIndex, err := storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - log.unstable.offset = lastIndex + 1 - log.unstable.logger = logger - // Initialize our committed and applied pointers to the time of the last compaction. - log.committed = firstIndex - 1 - log.applied = firstIndex - 1 - - return log -} - -func (l *raftLog) String() string { - return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries)) -} - -// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise, -// it returns (last index of new entries, true). -func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { - if l.matchTerm(index, logTerm) { - lastnewi = index + uint64(len(ents)) - ci := l.findConflict(ents) - switch { - case ci == 0: - case ci <= l.committed: - l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) - default: - offset := index + 1 - l.append(ents[ci-offset:]...) - } - l.commitTo(min(committed, lastnewi)) - return lastnewi, true - } - return 0, false -} - -func (l *raftLog) append(ents ...pb.Entry) uint64 { - if len(ents) == 0 { - return l.lastIndex() - } - if after := ents[0].Index - 1; after < l.committed { - l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed) - } - l.unstable.truncateAndAppend(ents) - return l.lastIndex() -} - -// findConflict finds the index of the conflict. -// It returns the first pair of conflicting entries between the existing -// entries and the given entries, if there are any. -// If there is no conflicting entries, and the existing entries contains -// all the given entries, zero will be returned. -// If there is no conflicting entries, but the given entries contains new -// entries, the index of the first new entry will be returned. -// An entry is considered to be conflicting if it has the same index but -// a different term. -// The first entry MUST have an index equal to the argument 'from'. -// The index of the given entries MUST be continuously increasing. -func (l *raftLog) findConflict(ents []pb.Entry) uint64 { - for _, ne := range ents { - if !l.matchTerm(ne.Index, ne.Term) { - if ne.Index <= l.lastIndex() { - l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]", - ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term) - } - return ne.Index - } - } - return 0 -} - -func (l *raftLog) unstableEntries() []pb.Entry { - if len(l.unstable.entries) == 0 { - return nil - } - return l.unstable.entries -} - -// nextEnts returns all the available entries for execution. -// If applied is smaller than the index of snapshot, it returns all committed -// entries after the index of snapshot. -func (l *raftLog) nextEnts() (ents []pb.Entry) { - off := max(l.applied+1, l.firstIndex()) - if l.committed+1 > off { - ents, err := l.slice(off, l.committed+1, noLimit) - if err != nil { - l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) - } - return ents - } - return nil -} - -// hasNextEnts returns if there is any available entries for execution. This -// is a fast check without heavy raftLog.slice() in raftLog.nextEnts(). -func (l *raftLog) hasNextEnts() bool { - off := max(l.applied+1, l.firstIndex()) - return l.committed+1 > off -} - -func (l *raftLog) snapshot() (pb.Snapshot, error) { - if l.unstable.snapshot != nil { - return *l.unstable.snapshot, nil - } - return l.storage.Snapshot() -} - -func (l *raftLog) firstIndex() uint64 { - if i, ok := l.unstable.maybeFirstIndex(); ok { - return i - } - index, err := l.storage.FirstIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - return index -} - -func (l *raftLog) lastIndex() uint64 { - if i, ok := l.unstable.maybeLastIndex(); ok { - return i - } - i, err := l.storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - return i -} - -func (l *raftLog) commitTo(tocommit uint64) { - // never decrease commit - if l.committed < tocommit { - if l.lastIndex() < tocommit { - l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex()) - } - l.committed = tocommit - } -} - -func (l *raftLog) appliedTo(i uint64) { - if i == 0 { - return - } - if l.committed < i || i < l.applied { - l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed) - } - l.applied = i -} - -func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) } - -func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) } - -func (l *raftLog) lastTerm() uint64 { - t, err := l.term(l.lastIndex()) - if err != nil { - l.logger.Panicf("unexpected error when getting the last term (%v)", err) - } - return t -} - -func (l *raftLog) term(i uint64) (uint64, error) { - // the valid term range is [index of dummy entry, last index] - dummyIndex := l.firstIndex() - 1 - if i < dummyIndex || i > l.lastIndex() { - // TODO: return an error instead? - return 0, nil - } - - if t, ok := l.unstable.maybeTerm(i); ok { - return t, nil - } - - t, err := l.storage.Term(i) - if err == nil { - return t, nil - } - if err == ErrCompacted || err == ErrUnavailable { - return 0, err - } - panic(err) // TODO(bdarnell) -} - -func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) { - if i > l.lastIndex() { - return nil, nil - } - return l.slice(i, l.lastIndex()+1, maxsize) -} - -// allEntries returns all entries in the log. -func (l *raftLog) allEntries() []pb.Entry { - ents, err := l.entries(l.firstIndex(), noLimit) - if err == nil { - return ents - } - if err == ErrCompacted { // try again if there was a racing compaction - return l.allEntries() - } - // TODO (xiangli): handle error? - panic(err) -} - -// isUpToDate determines if the given (lastIndex,term) log is more up-to-date -// by comparing the index and term of the last entries in the existing logs. -// If the logs have last entries with different terms, then the log with the -// later term is more up-to-date. If the logs end with the same term, then -// whichever log has the larger lastIndex is more up-to-date. If the logs are -// the same, the given log is up-to-date. -func (l *raftLog) isUpToDate(lasti, term uint64) bool { - return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex()) -} - -func (l *raftLog) matchTerm(i, term uint64) bool { - t, err := l.term(i) - if err != nil { - return false - } - return t == term -} - -func (l *raftLog) maybeCommit(maxIndex, term uint64) bool { - if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term { - l.commitTo(maxIndex) - return true - } - return false -} - -func (l *raftLog) restore(s pb.Snapshot) { - l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term) - l.committed = s.Metadata.Index - l.unstable.restore(s) -} - -// slice returns a slice of log entries from lo through hi-1, inclusive. -func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { - err := l.mustCheckOutOfBounds(lo, hi) - if err != nil { - return nil, err - } - if lo == hi { - return nil, nil - } - var ents []pb.Entry - if lo < l.unstable.offset { - storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize) - if err == ErrCompacted { - return nil, err - } else if err == ErrUnavailable { - l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset)) - } else if err != nil { - panic(err) // TODO(bdarnell) - } - - // check if ents has reached the size limitation - if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo { - return storedEnts, nil - } - - ents = storedEnts - } - if hi > l.unstable.offset { - unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) - if len(ents) > 0 { - ents = append([]pb.Entry{}, ents...) - ents = append(ents, unstable...) - } else { - ents = unstable - } - } - return limitSize(ents, maxSize), nil -} - -// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries) -func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { - if lo > hi { - l.logger.Panicf("invalid slice %d > %d", lo, hi) - } - fi := l.firstIndex() - if lo < fi { - return ErrCompacted - } - - length := l.lastIndex() + 1 - fi - if lo < fi || hi > fi+length { - l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) - } - return nil -} - -func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 { - if err == nil { - return t - } - if err == ErrCompacted { - return 0 - } - l.logger.Panicf("unexpected error (%v)", err) - return 0 -} diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go deleted file mode 100644 index 8ae301c3d8d..00000000000 --- a/vendor/github.com/coreos/etcd/raft/log_unstable.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import pb "github.com/coreos/etcd/raft/raftpb" - -// unstable.entries[i] has raft log position i+unstable.offset. -// Note that unstable.offset may be less than the highest log -// position in storage; this means that the next write to storage -// might need to truncate the log before persisting unstable.entries. -type unstable struct { - // the incoming unstable snapshot, if any. - snapshot *pb.Snapshot - // all entries that have not yet been written to storage. - entries []pb.Entry - offset uint64 - - logger Logger -} - -// maybeFirstIndex returns the index of the first possible entry in entries -// if it has a snapshot. -func (u *unstable) maybeFirstIndex() (uint64, bool) { - if u.snapshot != nil { - return u.snapshot.Metadata.Index + 1, true - } - return 0, false -} - -// maybeLastIndex returns the last index if it has at least one -// unstable entry or snapshot. -func (u *unstable) maybeLastIndex() (uint64, bool) { - if l := len(u.entries); l != 0 { - return u.offset + uint64(l) - 1, true - } - if u.snapshot != nil { - return u.snapshot.Metadata.Index, true - } - return 0, false -} - -// maybeTerm returns the term of the entry at index i, if there -// is any. -func (u *unstable) maybeTerm(i uint64) (uint64, bool) { - if i < u.offset { - if u.snapshot == nil { - return 0, false - } - if u.snapshot.Metadata.Index == i { - return u.snapshot.Metadata.Term, true - } - return 0, false - } - - last, ok := u.maybeLastIndex() - if !ok { - return 0, false - } - if i > last { - return 0, false - } - return u.entries[i-u.offset].Term, true -} - -func (u *unstable) stableTo(i, t uint64) { - gt, ok := u.maybeTerm(i) - if !ok { - return - } - // if i < offset, term is matched with the snapshot - // only update the unstable entries if term is matched with - // an unstable entry. - if gt == t && i >= u.offset { - u.entries = u.entries[i+1-u.offset:] - u.offset = i + 1 - } -} - -func (u *unstable) stableSnapTo(i uint64) { - if u.snapshot != nil && u.snapshot.Metadata.Index == i { - u.snapshot = nil - } -} - -func (u *unstable) restore(s pb.Snapshot) { - u.offset = s.Metadata.Index + 1 - u.entries = nil - u.snapshot = &s -} - -func (u *unstable) truncateAndAppend(ents []pb.Entry) { - after := ents[0].Index - switch { - case after == u.offset+uint64(len(u.entries)): - // after is the next index in the u.entries - // directly append - u.entries = append(u.entries, ents...) - case after <= u.offset: - u.logger.Infof("replace the unstable entries from index %d", after) - // The log is being truncated to before our current offset - // portion, so set the offset and replace the entries - u.offset = after - u.entries = ents - default: - // truncate to after and copy to u.entries - // then append - u.logger.Infof("truncate the unstable entries before index %d", after) - u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...) - u.entries = append(u.entries, ents...) - } -} - -func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { - u.mustCheckOutOfBounds(lo, hi) - return u.entries[lo-u.offset : hi-u.offset] -} - -// u.offset <= lo <= hi <= u.offset+len(u.offset) -func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { - if lo > hi { - u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) - } - upper := u.offset + uint64(len(u.entries)) - if lo < u.offset || hi > upper { - u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper) - } -} diff --git a/vendor/github.com/coreos/etcd/raft/logger.go b/vendor/github.com/coreos/etcd/raft/logger.go deleted file mode 100644 index 92e55b373e1..00000000000 --- a/vendor/github.com/coreos/etcd/raft/logger.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "fmt" - "io/ioutil" - "log" - "os" -) - -type Logger interface { - Debug(v ...interface{}) - Debugf(format string, v ...interface{}) - - Error(v ...interface{}) - Errorf(format string, v ...interface{}) - - Info(v ...interface{}) - Infof(format string, v ...interface{}) - - Warning(v ...interface{}) - Warningf(format string, v ...interface{}) - - Fatal(v ...interface{}) - Fatalf(format string, v ...interface{}) - - Panic(v ...interface{}) - Panicf(format string, v ...interface{}) -} - -func SetLogger(l Logger) { raftLogger = l } - -var ( - defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} - discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} - raftLogger = Logger(defaultLogger) -) - -const ( - calldepth = 2 -) - -// DefaultLogger is a default implementation of the Logger interface. -type DefaultLogger struct { - *log.Logger - debug bool -} - -func (l *DefaultLogger) EnableTimestamps() { - l.SetFlags(l.Flags() | log.Ldate | log.Ltime) -} - -func (l *DefaultLogger) EnableDebug() { - l.debug = true -} - -func (l *DefaultLogger) Debug(v ...interface{}) { - if l.debug { - l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) - } -} - -func (l *DefaultLogger) Debugf(format string, v ...interface{}) { - if l.debug { - l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) - } -} - -func (l *DefaultLogger) Info(v ...interface{}) { - l.Output(calldepth, header("INFO", fmt.Sprint(v...))) -} - -func (l *DefaultLogger) Infof(format string, v ...interface{}) { - l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) -} - -func (l *DefaultLogger) Error(v ...interface{}) { - l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) -} - -func (l *DefaultLogger) Errorf(format string, v ...interface{}) { - l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) -} - -func (l *DefaultLogger) Warning(v ...interface{}) { - l.Output(calldepth, header("WARN", fmt.Sprint(v...))) -} - -func (l *DefaultLogger) Warningf(format string, v ...interface{}) { - l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) -} - -func (l *DefaultLogger) Fatal(v ...interface{}) { - l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) - os.Exit(1) -} - -func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { - l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) - os.Exit(1) -} - -func (l *DefaultLogger) Panic(v ...interface{}) { - l.Logger.Panic(v) -} - -func (l *DefaultLogger) Panicf(format string, v ...interface{}) { - l.Logger.Panicf(format, v...) -} - -func header(lvl, msg string) string { - return fmt.Sprintf("%s: %s", lvl, msg) -} diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go deleted file mode 100644 index c8410fdc77f..00000000000 --- a/vendor/github.com/coreos/etcd/raft/node.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - - pb "github.com/coreos/etcd/raft/raftpb" - "golang.org/x/net/context" -) - -type SnapshotStatus int - -const ( - SnapshotFinish SnapshotStatus = 1 - SnapshotFailure SnapshotStatus = 2 -) - -var ( - emptyState = pb.HardState{} - - // ErrStopped is returned by methods on Nodes that have been stopped. - ErrStopped = errors.New("raft: stopped") -) - -// SoftState provides state that is useful for logging and debugging. -// The state is volatile and does not need to be persisted to the WAL. -type SoftState struct { - Lead uint64 // must use atomic operations to access; keep 64-bit aligned. - RaftState StateType -} - -func (a *SoftState) equal(b *SoftState) bool { - return a.Lead == b.Lead && a.RaftState == b.RaftState -} - -// Ready encapsulates the entries and messages that are ready to read, -// be saved to stable storage, committed or sent to other peers. -// All fields in Ready are read-only. -type Ready struct { - // The current volatile state of a Node. - // SoftState will be nil if there is no update. - // It is not required to consume or store SoftState. - *SoftState - - // The current state of a Node to be saved to stable storage BEFORE - // Messages are sent. - // HardState will be equal to empty state if there is no update. - pb.HardState - - // ReadStates can be used for node to serve linearizable read requests locally - // when its applied index is greater than the index in ReadState. - // Note that the readState will be returned when raft receives msgReadIndex. - // The returned is only valid for the request that requested to read. - ReadStates []ReadState - - // Entries specifies entries to be saved to stable storage BEFORE - // Messages are sent. - Entries []pb.Entry - - // Snapshot specifies the snapshot to be saved to stable storage. - Snapshot pb.Snapshot - - // CommittedEntries specifies entries to be committed to a - // store/state-machine. These have previously been committed to stable - // store. - CommittedEntries []pb.Entry - - // Messages specifies outbound messages to be sent AFTER Entries are - // committed to stable storage. - // If it contains a MsgSnap message, the application MUST report back to raft - // when the snapshot has been received or has failed by calling ReportSnapshot. - Messages []pb.Message -} - -func isHardStateEqual(a, b pb.HardState) bool { - return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit -} - -// IsEmptyHardState returns true if the given HardState is empty. -func IsEmptyHardState(st pb.HardState) bool { - return isHardStateEqual(st, emptyState) -} - -// IsEmptySnap returns true if the given Snapshot is empty. -func IsEmptySnap(sp pb.Snapshot) bool { - return sp.Metadata.Index == 0 -} - -func (rd Ready) containsUpdates() bool { - return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) || - !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 || - len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0 -} - -// Node represents a node in a raft cluster. -type Node interface { - // Tick increments the internal logical clock for the Node by a single tick. Election - // timeouts and heartbeat timeouts are in units of ticks. - Tick() - // Campaign causes the Node to transition to candidate state and start campaigning to become leader. - Campaign(ctx context.Context) error - // Propose proposes that data be appended to the log. - Propose(ctx context.Context, data []byte) error - // ProposeConfChange proposes config change. - // At most one ConfChange can be in the process of going through consensus. - // Application needs to call ApplyConfChange when applying EntryConfChange type entry. - ProposeConfChange(ctx context.Context, cc pb.ConfChange) error - // Step advances the state machine using the given message. ctx.Err() will be returned, if any. - Step(ctx context.Context, msg pb.Message) error - - // Ready returns a channel that returns the current point-in-time state. - // Users of the Node must call Advance after retrieving the state returned by Ready. - // - // NOTE: No committed entries from the next Ready may be applied until all committed entries - // and snapshots from the previous one have finished. - Ready() <-chan Ready - - // Advance notifies the Node that the application has saved progress up to the last Ready. - // It prepares the node to return the next available Ready. - // - // The application should generally call Advance after it applies the entries in last Ready. - // - // However, as an optimization, the application may call Advance while it is applying the - // commands. For example. when the last Ready contains a snapshot, the application might take - // a long time to apply the snapshot data. To continue receiving Ready without blocking raft - // progress, it can call Advance before finishing applying the last ready. - Advance() - // ApplyConfChange applies config change to the local node. - // Returns an opaque ConfState protobuf which must be recorded - // in snapshots. Will never return nil; it returns a pointer only - // to match MemoryStorage.Compact. - ApplyConfChange(cc pb.ConfChange) *pb.ConfState - - // TransferLeadership attempts to transfer leadership to the given transferee. - TransferLeadership(ctx context.Context, lead, transferee uint64) - - // ReadIndex request a read state. The read state will be set in the ready. - // Read state has a read index. Once the application advances further than the read - // index, any linearizable read requests issued before the read request can be - // processed safely. The read state will have the same rctx attached. - ReadIndex(ctx context.Context, rctx []byte) error - - // Status returns the current status of the raft state machine. - Status() Status - // ReportUnreachable reports the given node is not reachable for the last send. - ReportUnreachable(id uint64) - // ReportSnapshot reports the status of the sent snapshot. - ReportSnapshot(id uint64, status SnapshotStatus) - // Stop performs any necessary termination of the Node. - Stop() -} - -type Peer struct { - ID uint64 - Context []byte -} - -// StartNode returns a new Node given configuration and a list of raft peers. -// It appends a ConfChangeAddNode entry for each given peer to the initial log. -func StartNode(c *Config, peers []Peer) Node { - r := newRaft(c) - // become the follower at term 1 and apply initial configuration - // entries of term 1 - r.becomeFollower(1, None) - for _, peer := range peers { - cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} - d, err := cc.Marshal() - if err != nil { - panic("unexpected marshal error") - } - e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d} - r.raftLog.append(e) - } - // Mark these initial entries as committed. - // TODO(bdarnell): These entries are still unstable; do we need to preserve - // the invariant that committed < unstable? - r.raftLog.committed = r.raftLog.lastIndex() - // Now apply them, mainly so that the application can call Campaign - // immediately after StartNode in tests. Note that these nodes will - // be added to raft twice: here and when the application's Ready - // loop calls ApplyConfChange. The calls to addNode must come after - // all calls to raftLog.append so progress.next is set after these - // bootstrapping entries (it is an error if we try to append these - // entries since they have already been committed). - // We do not set raftLog.applied so the application will be able - // to observe all conf changes via Ready.CommittedEntries. - for _, peer := range peers { - r.addNode(peer.ID) - } - - n := newNode() - n.logger = c.Logger - go n.run(r) - return &n -} - -// RestartNode is similar to StartNode but does not take a list of peers. -// The current membership of the cluster will be restored from the Storage. -// If the caller has an existing state machine, pass in the last log index that -// has been applied to it; otherwise use zero. -func RestartNode(c *Config) Node { - r := newRaft(c) - - n := newNode() - n.logger = c.Logger - go n.run(r) - return &n -} - -// node is the canonical implementation of the Node interface -type node struct { - propc chan pb.Message - recvc chan pb.Message - confc chan pb.ConfChange - confstatec chan pb.ConfState - readyc chan Ready - advancec chan struct{} - tickc chan struct{} - done chan struct{} - stop chan struct{} - status chan chan Status - - logger Logger -} - -func newNode() node { - return node{ - propc: make(chan pb.Message), - recvc: make(chan pb.Message), - confc: make(chan pb.ConfChange), - confstatec: make(chan pb.ConfState), - readyc: make(chan Ready), - advancec: make(chan struct{}), - // make tickc a buffered chan, so raft node can buffer some ticks when the node - // is busy processing raft messages. Raft node will resume process buffered - // ticks when it becomes idle. - tickc: make(chan struct{}, 128), - done: make(chan struct{}), - stop: make(chan struct{}), - status: make(chan chan Status), - } -} - -func (n *node) Stop() { - select { - case n.stop <- struct{}{}: - // Not already stopped, so trigger it - case <-n.done: - // Node has already been stopped - no need to do anything - return - } - // Block until the stop has been acknowledged by run() - <-n.done -} - -func (n *node) run(r *raft) { - var propc chan pb.Message - var readyc chan Ready - var advancec chan struct{} - var prevLastUnstablei, prevLastUnstablet uint64 - var havePrevLastUnstablei bool - var prevSnapi uint64 - var rd Ready - - lead := None - prevSoftSt := r.softState() - prevHardSt := emptyState - - for { - if advancec != nil { - readyc = nil - } else { - rd = newReady(r, prevSoftSt, prevHardSt) - if rd.containsUpdates() { - readyc = n.readyc - } else { - readyc = nil - } - } - - if lead != r.lead { - if r.hasLeader() { - if lead == None { - r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term) - } else { - r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term) - } - propc = n.propc - } else { - r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term) - propc = nil - } - lead = r.lead - } - - select { - // TODO: maybe buffer the config propose if there exists one (the way - // described in raft dissertation) - // Currently it is dropped in Step silently. - case m := <-propc: - m.From = r.id - r.Step(m) - case m := <-n.recvc: - // filter out response message from unknown From. - if _, ok := r.prs[m.From]; ok || !IsResponseMsg(m.Type) { - r.Step(m) // raft never returns an error - } - case cc := <-n.confc: - if cc.NodeID == None { - r.resetPendingConf() - select { - case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: - case <-n.done: - } - break - } - switch cc.Type { - case pb.ConfChangeAddNode: - r.addNode(cc.NodeID) - case pb.ConfChangeRemoveNode: - // block incoming proposal when local node is - // removed - if cc.NodeID == r.id { - propc = nil - } - r.removeNode(cc.NodeID) - case pb.ConfChangeUpdateNode: - r.resetPendingConf() - default: - panic("unexpected conf type") - } - select { - case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: - case <-n.done: - } - case <-n.tickc: - r.tick() - case readyc <- rd: - if rd.SoftState != nil { - prevSoftSt = rd.SoftState - } - if len(rd.Entries) > 0 { - prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index - prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term - havePrevLastUnstablei = true - } - if !IsEmptyHardState(rd.HardState) { - prevHardSt = rd.HardState - } - if !IsEmptySnap(rd.Snapshot) { - prevSnapi = rd.Snapshot.Metadata.Index - } - - r.msgs = nil - r.readStates = nil - advancec = n.advancec - case <-advancec: - if prevHardSt.Commit != 0 { - r.raftLog.appliedTo(prevHardSt.Commit) - } - if havePrevLastUnstablei { - r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet) - havePrevLastUnstablei = false - } - r.raftLog.stableSnapTo(prevSnapi) - advancec = nil - case c := <-n.status: - c <- getStatus(r) - case <-n.stop: - close(n.done) - return - } - } -} - -// Tick increments the internal logical clock for this Node. Election timeouts -// and heartbeat timeouts are in units of ticks. -func (n *node) Tick() { - select { - case n.tickc <- struct{}{}: - case <-n.done: - default: - n.logger.Warningf("A tick missed to fire. Node blocks too long!") - } -} - -func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) } - -func (n *node) Propose(ctx context.Context, data []byte) error { - return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) -} - -func (n *node) Step(ctx context.Context, m pb.Message) error { - // ignore unexpected local messages receiving over network - if IsLocalMsg(m.Type) { - // TODO: return an error? - return nil - } - return n.step(ctx, m) -} - -func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error { - data, err := cc.Marshal() - if err != nil { - return err - } - return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}}) -} - -// Step advances the state machine using msgs. The ctx.Err() will be returned, -// if any. -func (n *node) step(ctx context.Context, m pb.Message) error { - ch := n.recvc - if m.Type == pb.MsgProp { - ch = n.propc - } - - select { - case ch <- m: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-n.done: - return ErrStopped - } -} - -func (n *node) Ready() <-chan Ready { return n.readyc } - -func (n *node) Advance() { - select { - case n.advancec <- struct{}{}: - case <-n.done: - } -} - -func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { - var cs pb.ConfState - select { - case n.confc <- cc: - case <-n.done: - } - select { - case cs = <-n.confstatec: - case <-n.done: - } - return &cs -} - -func (n *node) Status() Status { - c := make(chan Status) - select { - case n.status <- c: - return <-c - case <-n.done: - return Status{} - } -} - -func (n *node) ReportUnreachable(id uint64) { - select { - case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}: - case <-n.done: - } -} - -func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) { - rej := status == SnapshotFailure - - select { - case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}: - case <-n.done: - } -} - -func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) { - select { - // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership - case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}: - case <-n.done: - case <-ctx.Done(): - } -} - -func (n *node) ReadIndex(ctx context.Context, rctx []byte) error { - return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) -} - -func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { - rd := Ready{ - Entries: r.raftLog.unstableEntries(), - CommittedEntries: r.raftLog.nextEnts(), - Messages: r.msgs, - } - if softSt := r.softState(); !softSt.equal(prevSoftSt) { - rd.SoftState = softSt - } - if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) { - rd.HardState = hardSt - } - if r.raftLog.unstable.snapshot != nil { - rd.Snapshot = *r.raftLog.unstable.snapshot - } - if len(r.readStates) != 0 { - rd.ReadStates = r.readStates - } - return rd -} diff --git a/vendor/github.com/coreos/etcd/raft/progress.go b/vendor/github.com/coreos/etcd/raft/progress.go deleted file mode 100644 index 77c7b52efe3..00000000000 --- a/vendor/github.com/coreos/etcd/raft/progress.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import "fmt" - -const ( - ProgressStateProbe ProgressStateType = iota - ProgressStateReplicate - ProgressStateSnapshot -) - -type ProgressStateType uint64 - -var prstmap = [...]string{ - "ProgressStateProbe", - "ProgressStateReplicate", - "ProgressStateSnapshot", -} - -func (st ProgressStateType) String() string { return prstmap[uint64(st)] } - -// Progress represents a follower’s progress in the view of the leader. Leader maintains -// progresses of all followers, and sends entries to the follower based on its progress. -type Progress struct { - Match, Next uint64 - // State defines how the leader should interact with the follower. - // - // When in ProgressStateProbe, leader sends at most one replication message - // per heartbeat interval. It also probes actual progress of the follower. - // - // When in ProgressStateReplicate, leader optimistically increases next - // to the latest entry sent after sending replication message. This is - // an optimized state for fast replicating log entries to the follower. - // - // When in ProgressStateSnapshot, leader should have sent out snapshot - // before and stops sending any replication message. - State ProgressStateType - // Paused is used in ProgressStateProbe. - // When Paused is true, raft should pause sending replication message to this peer. - Paused bool - // PendingSnapshot is used in ProgressStateSnapshot. - // If there is a pending snapshot, the pendingSnapshot will be set to the - // index of the snapshot. If pendingSnapshot is set, the replication process of - // this Progress will be paused. raft will not resend snapshot until the pending one - // is reported to be failed. - PendingSnapshot uint64 - - // RecentActive is true if the progress is recently active. Receiving any messages - // from the corresponding follower indicates the progress is active. - // RecentActive can be reset to false after an election timeout. - RecentActive bool - - // inflights is a sliding window for the inflight messages. - // Each inflight message contains one or more log entries. - // The max number of entries per message is defined in raft config as MaxSizePerMsg. - // Thus inflight effectively limits both the number of inflight messages - // and the bandwidth each Progress can use. - // When inflights is full, no more message should be sent. - // When a leader sends out a message, the index of the last - // entry should be added to inflights. The index MUST be added - // into inflights in order. - // When a leader receives a reply, the previous inflights should - // be freed by calling inflights.freeTo with the index of the last - // received entry. - ins *inflights -} - -func (pr *Progress) resetState(state ProgressStateType) { - pr.Paused = false - pr.PendingSnapshot = 0 - pr.State = state - pr.ins.reset() -} - -func (pr *Progress) becomeProbe() { - // If the original state is ProgressStateSnapshot, progress knows that - // the pending snapshot has been sent to this peer successfully, then - // probes from pendingSnapshot + 1. - if pr.State == ProgressStateSnapshot { - pendingSnapshot := pr.PendingSnapshot - pr.resetState(ProgressStateProbe) - pr.Next = max(pr.Match+1, pendingSnapshot+1) - } else { - pr.resetState(ProgressStateProbe) - pr.Next = pr.Match + 1 - } -} - -func (pr *Progress) becomeReplicate() { - pr.resetState(ProgressStateReplicate) - pr.Next = pr.Match + 1 -} - -func (pr *Progress) becomeSnapshot(snapshoti uint64) { - pr.resetState(ProgressStateSnapshot) - pr.PendingSnapshot = snapshoti -} - -// maybeUpdate returns false if the given n index comes from an outdated message. -// Otherwise it updates the progress and returns true. -func (pr *Progress) maybeUpdate(n uint64) bool { - var updated bool - if pr.Match < n { - pr.Match = n - updated = true - pr.resume() - } - if pr.Next < n+1 { - pr.Next = n + 1 - } - return updated -} - -func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 } - -// maybeDecrTo returns false if the given to index comes from an out of order message. -// Otherwise it decreases the progress next index to min(rejected, last) and returns true. -func (pr *Progress) maybeDecrTo(rejected, last uint64) bool { - if pr.State == ProgressStateReplicate { - // the rejection must be stale if the progress has matched and "rejected" - // is smaller than "match". - if rejected <= pr.Match { - return false - } - // directly decrease next to match + 1 - pr.Next = pr.Match + 1 - return true - } - - // the rejection must be stale if "rejected" does not match next - 1 - if pr.Next-1 != rejected { - return false - } - - if pr.Next = min(rejected, last+1); pr.Next < 1 { - pr.Next = 1 - } - pr.resume() - return true -} - -func (pr *Progress) pause() { pr.Paused = true } -func (pr *Progress) resume() { pr.Paused = false } - -// IsPaused returns whether sending log entries to this node has been -// paused. A node may be paused because it has rejected recent -// MsgApps, is currently waiting for a snapshot, or has reached the -// MaxInflightMsgs limit. -func (pr *Progress) IsPaused() bool { - switch pr.State { - case ProgressStateProbe: - return pr.Paused - case ProgressStateReplicate: - return pr.ins.full() - case ProgressStateSnapshot: - return true - default: - panic("unexpected state") - } -} - -func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 } - -// needSnapshotAbort returns true if snapshot progress's Match -// is equal or higher than the pendingSnapshot. -func (pr *Progress) needSnapshotAbort() bool { - return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot -} - -func (pr *Progress) String() string { - return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot) -} - -type inflights struct { - // the starting index in the buffer - start int - // number of inflights in the buffer - count int - - // the size of the buffer - size int - - // buffer contains the index of the last entry - // inside one message. - buffer []uint64 -} - -func newInflights(size int) *inflights { - return &inflights{ - size: size, - } -} - -// add adds an inflight into inflights -func (in *inflights) add(inflight uint64) { - if in.full() { - panic("cannot add into a full inflights") - } - next := in.start + in.count - size := in.size - if next >= size { - next -= size - } - if next >= len(in.buffer) { - in.growBuf() - } - in.buffer[next] = inflight - in.count++ -} - -// grow the inflight buffer by doubling up to inflights.size. We grow on demand -// instead of preallocating to inflights.size to handle systems which have -// thousands of Raft groups per process. -func (in *inflights) growBuf() { - newSize := len(in.buffer) * 2 - if newSize == 0 { - newSize = 1 - } else if newSize > in.size { - newSize = in.size - } - newBuffer := make([]uint64, newSize) - copy(newBuffer, in.buffer) - in.buffer = newBuffer -} - -// freeTo frees the inflights smaller or equal to the given `to` flight. -func (in *inflights) freeTo(to uint64) { - if in.count == 0 || to < in.buffer[in.start] { - // out of the left side of the window - return - } - - i, idx := 0, in.start - for i = 0; i < in.count; i++ { - if to < in.buffer[idx] { // found the first large inflight - break - } - - // increase index and maybe rotate - size := in.size - if idx++; idx >= size { - idx -= size - } - } - // free i inflights and set new start index - in.count -= i - in.start = idx - if in.count == 0 { - // inflights is empty, reset the start index so that we don't grow the - // buffer unnecessarily. - in.start = 0 - } -} - -func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) } - -// full returns true if the inflights is full. -func (in *inflights) full() bool { - return in.count == in.size -} - -// resets frees all inflights. -func (in *inflights) reset() { - in.count = 0 - in.start = 0 -} diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go deleted file mode 100644 index 7be4407ee2b..00000000000 --- a/vendor/github.com/coreos/etcd/raft/raft.go +++ /dev/null @@ -1,1253 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "bytes" - "errors" - "fmt" - "math" - "math/rand" - "sort" - "strings" - "sync" - "time" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -// None is a placeholder node ID used when there is no leader. -const None uint64 = 0 -const noLimit = math.MaxUint64 - -// Possible values for StateType. -const ( - StateFollower StateType = iota - StateCandidate - StateLeader - StatePreCandidate - numStates -) - -type ReadOnlyOption int - -const ( - // ReadOnlySafe guarantees the linearizability of the read only request by - // communicating with the quorum. It is the default and suggested option. - ReadOnlySafe ReadOnlyOption = iota - // ReadOnlyLeaseBased ensures linearizability of the read only request by - // relying on the leader lease. It can be affected by clock drift. - // If the clock drift is unbounded, leader might keep the lease longer than it - // should (clock can move backward/pause without any bound). ReadIndex is not safe - // in that case. - ReadOnlyLeaseBased -) - -// Possible values for CampaignType -const ( - // campaignPreElection represents the first phase of a normal election when - // Config.PreVote is true. - campaignPreElection CampaignType = "CampaignPreElection" - // campaignElection represents a normal (time-based) election (the second phase - // of the election when Config.PreVote is true). - campaignElection CampaignType = "CampaignElection" - // campaignTransfer represents the type of leader transfer - campaignTransfer CampaignType = "CampaignTransfer" -) - -// lockedRand is a small wrapper around rand.Rand to provide -// synchronization. Only the methods needed by the code are exposed -// (e.g. Intn). -type lockedRand struct { - mu sync.Mutex - rand *rand.Rand -} - -func (r *lockedRand) Intn(n int) int { - r.mu.Lock() - v := r.rand.Intn(n) - r.mu.Unlock() - return v -} - -var globalRand = &lockedRand{ - rand: rand.New(rand.NewSource(time.Now().UnixNano())), -} - -// CampaignType represents the type of campaigning -// the reason we use the type of string instead of uint64 -// is because it's simpler to compare and fill in raft entries -type CampaignType string - -// StateType represents the role of a node in a cluster. -type StateType uint64 - -var stmap = [...]string{ - "StateFollower", - "StateCandidate", - "StateLeader", - "StatePreCandidate", -} - -func (st StateType) String() string { - return stmap[uint64(st)] -} - -// Config contains the parameters to start a raft. -type Config struct { - // ID is the identity of the local raft. ID cannot be 0. - ID uint64 - - // peers contains the IDs of all nodes (including self) in the raft cluster. It - // should only be set when starting a new raft cluster. Restarting raft from - // previous configuration will panic if peers is set. peer is private and only - // used for testing right now. - peers []uint64 - - // ElectionTick is the number of Node.Tick invocations that must pass between - // elections. That is, if a follower does not receive any message from the - // leader of current term before ElectionTick has elapsed, it will become - // candidate and start an election. ElectionTick must be greater than - // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid - // unnecessary leader switching. - ElectionTick int - // HeartbeatTick is the number of Node.Tick invocations that must pass between - // heartbeats. That is, a leader sends heartbeat messages to maintain its - // leadership every HeartbeatTick ticks. - HeartbeatTick int - - // Storage is the storage for raft. raft generates entries and states to be - // stored in storage. raft reads the persisted entries and states out of - // Storage when it needs. raft reads out the previous state and configuration - // out of storage when restarting. - Storage Storage - // Applied is the last applied index. It should only be set when restarting - // raft. raft will not return entries to the application smaller or equal to - // Applied. If Applied is unset when restarting, raft might return previous - // applied entries. This is a very application dependent configuration. - Applied uint64 - - // MaxSizePerMsg limits the max size of each append message. Smaller value - // lowers the raft recovery cost(initial probing and message lost during normal - // operation). On the other side, it might affect the throughput during normal - // replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per - // message. - MaxSizePerMsg uint64 - // MaxInflightMsgs limits the max number of in-flight append messages during - // optimistic replication phase. The application transportation layer usually - // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid - // overflowing that sending buffer. TODO (xiangli): feedback to application to - // limit the proposal rate? - MaxInflightMsgs int - - // CheckQuorum specifies if the leader should check quorum activity. Leader - // steps down when quorum is not active for an electionTimeout. - CheckQuorum bool - - // PreVote enables the Pre-Vote algorithm described in raft thesis section - // 9.6. This prevents disruption when a node that has been partitioned away - // rejoins the cluster. - PreVote bool - - // ReadOnlyOption specifies how the read only request is processed. - // - // ReadOnlySafe guarantees the linearizability of the read only request by - // communicating with the quorum. It is the default and suggested option. - // - // ReadOnlyLeaseBased ensures linearizability of the read only request by - // relying on the leader lease. It can be affected by clock drift. - // If the clock drift is unbounded, leader might keep the lease longer than it - // should (clock can move backward/pause without any bound). ReadIndex is not safe - // in that case. - ReadOnlyOption ReadOnlyOption - - // Logger is the logger used for raft log. For multinode which can host - // multiple raft group, each raft group can have its own logger - Logger Logger -} - -func (c *Config) validate() error { - if c.ID == None { - return errors.New("cannot use none as id") - } - - if c.HeartbeatTick <= 0 { - return errors.New("heartbeat tick must be greater than 0") - } - - if c.ElectionTick <= c.HeartbeatTick { - return errors.New("election tick must be greater than heartbeat tick") - } - - if c.Storage == nil { - return errors.New("storage cannot be nil") - } - - if c.MaxInflightMsgs <= 0 { - return errors.New("max inflight messages must be greater than 0") - } - - if c.Logger == nil { - c.Logger = raftLogger - } - - return nil -} - -type raft struct { - id uint64 - - Term uint64 - Vote uint64 - - readStates []ReadState - - // the log - raftLog *raftLog - - maxInflight int - maxMsgSize uint64 - prs map[uint64]*Progress - - state StateType - - votes map[uint64]bool - - msgs []pb.Message - - // the leader id - lead uint64 - // leadTransferee is id of the leader transfer target when its value is not zero. - // Follow the procedure defined in raft thesis 3.10. - leadTransferee uint64 - // New configuration is ignored if there exists unapplied configuration. - pendingConf bool - - readOnly *readOnly - - // number of ticks since it reached last electionTimeout when it is leader - // or candidate. - // number of ticks since it reached last electionTimeout or received a - // valid message from current leader when it is a follower. - electionElapsed int - - // number of ticks since it reached last heartbeatTimeout. - // only leader keeps heartbeatElapsed. - heartbeatElapsed int - - checkQuorum bool - preVote bool - - heartbeatTimeout int - electionTimeout int - // randomizedElectionTimeout is a random number between - // [electiontimeout, 2 * electiontimeout - 1]. It gets reset - // when raft changes its state to follower or candidate. - randomizedElectionTimeout int - - tick func() - step stepFunc - - logger Logger -} - -func newRaft(c *Config) *raft { - if err := c.validate(); err != nil { - panic(err.Error()) - } - raftlog := newLog(c.Storage, c.Logger) - hs, cs, err := c.Storage.InitialState() - if err != nil { - panic(err) // TODO(bdarnell) - } - peers := c.peers - if len(cs.Nodes) > 0 { - if len(peers) > 0 { - // TODO(bdarnell): the peers argument is always nil except in - // tests; the argument should be removed and these tests should be - // updated to specify their nodes through a snapshot. - panic("cannot specify both newRaft(peers) and ConfState.Nodes)") - } - peers = cs.Nodes - } - r := &raft{ - id: c.ID, - lead: None, - raftLog: raftlog, - maxMsgSize: c.MaxSizePerMsg, - maxInflight: c.MaxInflightMsgs, - prs: make(map[uint64]*Progress), - electionTimeout: c.ElectionTick, - heartbeatTimeout: c.HeartbeatTick, - logger: c.Logger, - checkQuorum: c.CheckQuorum, - preVote: c.PreVote, - readOnly: newReadOnly(c.ReadOnlyOption), - } - for _, p := range peers { - r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)} - } - if !isHardStateEqual(hs, emptyState) { - r.loadState(hs) - } - if c.Applied > 0 { - raftlog.appliedTo(c.Applied) - } - r.becomeFollower(r.Term, None) - - var nodesStrs []string - for _, n := range r.nodes() { - nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) - } - - r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]", - r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm()) - return r -} - -func (r *raft) hasLeader() bool { return r.lead != None } - -func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} } - -func (r *raft) hardState() pb.HardState { - return pb.HardState{ - Term: r.Term, - Vote: r.Vote, - Commit: r.raftLog.committed, - } -} - -func (r *raft) quorum() int { return len(r.prs)/2 + 1 } - -func (r *raft) nodes() []uint64 { - nodes := make([]uint64, 0, len(r.prs)) - for id := range r.prs { - nodes = append(nodes, id) - } - sort.Sort(uint64Slice(nodes)) - return nodes -} - -// send persists state to stable storage and then sends to its mailbox. -func (r *raft) send(m pb.Message) { - m.From = r.id - if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { - if m.Term == 0 { - // PreVote RPCs are sent at a term other than our actual term, so the code - // that sends these messages is responsible for setting the term. - panic(fmt.Sprintf("term should be set when sending %s", m.Type)) - } - } else { - if m.Term != 0 { - panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term)) - } - // do not attach term to MsgProp, MsgReadIndex - // proposals are a way to forward to the leader and - // should be treated as local message. - // MsgReadIndex is also forwarded to leader. - if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex { - m.Term = r.Term - } - } - r.msgs = append(r.msgs, m) -} - -// sendAppend sends RPC, with entries to the given peer. -func (r *raft) sendAppend(to uint64) { - pr := r.prs[to] - if pr.IsPaused() { - return - } - m := pb.Message{} - m.To = to - - term, errt := r.raftLog.term(pr.Next - 1) - ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) - - if errt != nil || erre != nil { // send snapshot if we failed to get term or entries - if !pr.RecentActive { - r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to) - return - } - - m.Type = pb.MsgSnap - snapshot, err := r.raftLog.snapshot() - if err != nil { - if err == ErrSnapshotTemporarilyUnavailable { - r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to) - return - } - panic(err) // TODO(bdarnell) - } - if IsEmptySnap(snapshot) { - panic("need non-empty snapshot") - } - m.Snapshot = snapshot - sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term - r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]", - r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) - pr.becomeSnapshot(sindex) - r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr) - } else { - m.Type = pb.MsgApp - m.Index = pr.Next - 1 - m.LogTerm = term - m.Entries = ents - m.Commit = r.raftLog.committed - if n := len(m.Entries); n != 0 { - switch pr.State { - // optimistically increase the next when in ProgressStateReplicate - case ProgressStateReplicate: - last := m.Entries[n-1].Index - pr.optimisticUpdate(last) - pr.ins.add(last) - case ProgressStateProbe: - pr.pause() - default: - r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State) - } - } - } - r.send(m) -} - -// sendHeartbeat sends an empty MsgApp -func (r *raft) sendHeartbeat(to uint64, ctx []byte) { - // Attach the commit as min(to.matched, r.committed). - // When the leader sends out heartbeat message, - // the receiver(follower) might not be matched with the leader - // or it might not have all the committed entries. - // The leader MUST NOT forward the follower's commit to - // an unmatched index. - commit := min(r.prs[to].Match, r.raftLog.committed) - m := pb.Message{ - To: to, - Type: pb.MsgHeartbeat, - Commit: commit, - Context: ctx, - } - - r.send(m) -} - -// bcastAppend sends RPC, with entries to all peers that are not up-to-date -// according to the progress recorded in r.prs. -func (r *raft) bcastAppend() { - for id := range r.prs { - if id == r.id { - continue - } - r.sendAppend(id) - } -} - -// bcastHeartbeat sends RPC, without entries to all the peers. -func (r *raft) bcastHeartbeat() { - lastCtx := r.readOnly.lastPendingRequestCtx() - if len(lastCtx) == 0 { - r.bcastHeartbeatWithCtx(nil) - } else { - r.bcastHeartbeatWithCtx([]byte(lastCtx)) - } -} - -func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { - for id := range r.prs { - if id == r.id { - continue - } - r.sendHeartbeat(id, ctx) - } -} - -// maybeCommit attempts to advance the commit index. Returns true if -// the commit index changed (in which case the caller should call -// r.bcastAppend). -func (r *raft) maybeCommit() bool { - // TODO(bmizerany): optimize.. Currently naive - mis := make(uint64Slice, 0, len(r.prs)) - for id := range r.prs { - mis = append(mis, r.prs[id].Match) - } - sort.Sort(sort.Reverse(mis)) - mci := mis[r.quorum()-1] - return r.raftLog.maybeCommit(mci, r.Term) -} - -func (r *raft) reset(term uint64) { - if r.Term != term { - r.Term = term - r.Vote = None - } - r.lead = None - - r.electionElapsed = 0 - r.heartbeatElapsed = 0 - r.resetRandomizedElectionTimeout() - - r.abortLeaderTransfer() - - r.votes = make(map[uint64]bool) - for id := range r.prs { - r.prs[id] = &Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight)} - if id == r.id { - r.prs[id].Match = r.raftLog.lastIndex() - } - } - r.pendingConf = false - r.readOnly = newReadOnly(r.readOnly.option) -} - -func (r *raft) appendEntry(es ...pb.Entry) { - li := r.raftLog.lastIndex() - for i := range es { - es[i].Term = r.Term - es[i].Index = li + 1 + uint64(i) - } - r.raftLog.append(es...) - r.prs[r.id].maybeUpdate(r.raftLog.lastIndex()) - // Regardless of maybeCommit's return, our caller will call bcastAppend. - r.maybeCommit() -} - -// tickElection is run by followers and candidates after r.electionTimeout. -func (r *raft) tickElection() { - r.electionElapsed++ - - if r.promotable() && r.pastElectionTimeout() { - r.electionElapsed = 0 - r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) - } -} - -// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout. -func (r *raft) tickHeartbeat() { - r.heartbeatElapsed++ - r.electionElapsed++ - - if r.electionElapsed >= r.electionTimeout { - r.electionElapsed = 0 - if r.checkQuorum { - r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) - } - // If current leader cannot transfer leadership in electionTimeout, it becomes leader again. - if r.state == StateLeader && r.leadTransferee != None { - r.abortLeaderTransfer() - } - } - - if r.state != StateLeader { - return - } - - if r.heartbeatElapsed >= r.heartbeatTimeout { - r.heartbeatElapsed = 0 - r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}) - } -} - -func (r *raft) becomeFollower(term uint64, lead uint64) { - r.step = stepFollower - r.reset(term) - r.tick = r.tickElection - r.lead = lead - r.state = StateFollower - r.logger.Infof("%x became follower at term %d", r.id, r.Term) -} - -func (r *raft) becomeCandidate() { - // TODO(xiangli) remove the panic when the raft implementation is stable - if r.state == StateLeader { - panic("invalid transition [leader -> candidate]") - } - r.step = stepCandidate - r.reset(r.Term + 1) - r.tick = r.tickElection - r.Vote = r.id - r.state = StateCandidate - r.logger.Infof("%x became candidate at term %d", r.id, r.Term) -} - -func (r *raft) becomePreCandidate() { - // TODO(xiangli) remove the panic when the raft implementation is stable - if r.state == StateLeader { - panic("invalid transition [leader -> pre-candidate]") - } - // Becoming a pre-candidate changes our step functions and state, - // but doesn't change anything else. In particular it does not increase - // r.Term or change r.Vote. - r.step = stepCandidate - r.tick = r.tickElection - r.state = StatePreCandidate - r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term) -} - -func (r *raft) becomeLeader() { - // TODO(xiangli) remove the panic when the raft implementation is stable - if r.state == StateFollower { - panic("invalid transition [follower -> leader]") - } - r.step = stepLeader - r.reset(r.Term) - r.tick = r.tickHeartbeat - r.lead = r.id - r.state = StateLeader - ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit) - if err != nil { - r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err) - } - - nconf := numOfPendingConf(ents) - if nconf > 1 { - panic("unexpected multiple uncommitted config entry") - } - if nconf == 1 { - r.pendingConf = true - } - - r.appendEntry(pb.Entry{Data: nil}) - r.logger.Infof("%x became leader at term %d", r.id, r.Term) -} - -func (r *raft) campaign(t CampaignType) { - var term uint64 - var voteMsg pb.MessageType - if t == campaignPreElection { - r.becomePreCandidate() - voteMsg = pb.MsgPreVote - // PreVote RPCs are sent for the next term before we've incremented r.Term. - term = r.Term + 1 - } else { - r.becomeCandidate() - voteMsg = pb.MsgVote - term = r.Term - } - if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) { - // We won the election after voting for ourselves (which must mean that - // this is a single-node cluster). Advance to the next state. - if t == campaignPreElection { - r.campaign(campaignElection) - } else { - r.becomeLeader() - } - return - } - for id := range r.prs { - if id == r.id { - continue - } - r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term) - - var ctx []byte - if t == campaignTransfer { - ctx = []byte(t) - } - r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx}) - } -} - -func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) { - if v { - r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term) - } else { - r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term) - } - if _, ok := r.votes[id]; !ok { - r.votes[id] = v - } - for _, vv := range r.votes { - if vv { - granted++ - } - } - return granted -} - -func (r *raft) Step(m pb.Message) error { - // Handle the message term, which may result in our stepping down to a follower. - switch { - case m.Term == 0: - // local message - case m.Term > r.Term: - lead := m.From - if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { - force := bytes.Equal(m.Context, []byte(campaignTransfer)) - inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout - if !force && inLease { - // If a server receives a RequestVote request within the minimum election timeout - // of hearing from a current leader, it does not update its term or grant its vote - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed) - return nil - } - lead = None - } - switch { - case m.Type == pb.MsgPreVote: - // Never change our term in response to a PreVote - case m.Type == pb.MsgPreVoteResp && !m.Reject: - // We send pre-vote requests with a term in our future. If the - // pre-vote is granted, we will increment our term when we get a - // quorum. If it is not, the term comes from the node that - // rejected our vote so we should become a follower at the new - // term. - default: - r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]", - r.id, r.Term, m.Type, m.From, m.Term) - r.becomeFollower(m.Term, lead) - } - - case m.Term < r.Term: - if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) { - // We have received messages from a leader at a lower term. It is possible - // that these messages were simply delayed in the network, but this could - // also mean that this node has advanced its term number during a network - // partition, and it is now unable to either win an election or to rejoin - // the majority on the old term. If checkQuorum is false, this will be - // handled by incrementing term numbers in response to MsgVote with a - // higher term, but if checkQuorum is true we may not advance the term on - // MsgVote and must generate other messages to advance the term. The net - // result of these two features is to minimize the disruption caused by - // nodes that have been removed from the cluster's configuration: a - // removed node will send MsgVotes (or MsgPreVotes) which will be ignored, - // but it will not receive MsgApp or MsgHeartbeat, so it will not create - // disruptive term increases - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp}) - } else { - // ignore other cases - r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", - r.id, r.Term, m.Type, m.From, m.Term) - } - return nil - } - - switch m.Type { - case pb.MsgHup: - if r.state != StateLeader { - ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit) - if err != nil { - r.logger.Panicf("unexpected error getting unapplied entries (%v)", err) - } - if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied { - r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n) - return nil - } - - r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term) - if r.preVote { - r.campaign(campaignPreElection) - } else { - r.campaign(campaignElection) - } - } else { - r.logger.Debugf("%x ignoring MsgHup because already leader", r.id) - } - - case pb.MsgVote, pb.MsgPreVote: - // The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should - // always equal r.Term. - if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) { - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) - r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type)}) - if m.Type == pb.MsgVote { - // Only record real votes. - r.electionElapsed = 0 - r.Vote = m.From - } - } else { - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) - r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type), Reject: true}) - } - - default: - r.step(r, m) - } - return nil -} - -type stepFunc func(r *raft, m pb.Message) - -func stepLeader(r *raft, m pb.Message) { - // These message types do not require any progress for m.From. - switch m.Type { - case pb.MsgBeat: - r.bcastHeartbeat() - return - case pb.MsgCheckQuorum: - if !r.checkQuorumActive() { - r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) - r.becomeFollower(r.Term, None) - } - return - case pb.MsgProp: - if len(m.Entries) == 0 { - r.logger.Panicf("%x stepped empty MsgProp", r.id) - } - if _, ok := r.prs[r.id]; !ok { - // If we are not currently a member of the range (i.e. this node - // was removed from the configuration while serving as leader), - // drop any new proposals. - return - } - if r.leadTransferee != None { - r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee) - return - } - - for i, e := range m.Entries { - if e.Type == pb.EntryConfChange { - if r.pendingConf { - r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String()) - m.Entries[i] = pb.Entry{Type: pb.EntryNormal} - } - r.pendingConf = true - } - } - r.appendEntry(m.Entries...) - r.bcastAppend() - return - case pb.MsgReadIndex: - if r.quorum() > 1 { - if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term { - // Reject read only request when this leader has not committed any log entry at its term. - return - } - - // thinking: use an interally defined context instead of the user given context. - // We can express this in terms of the term and index instead of a user-supplied value. - // This would allow multiple reads to piggyback on the same message. - switch r.readOnly.option { - case ReadOnlySafe: - r.readOnly.addRequest(r.raftLog.committed, m) - r.bcastHeartbeatWithCtx(m.Entries[0].Data) - case ReadOnlyLeaseBased: - var ri uint64 - if r.checkQuorum { - ri = r.raftLog.committed - } - if m.From == None || m.From == r.id { // from local member - r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) - } else { - r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries}) - } - } - } else { - r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) - } - - return - } - - // All other message types require a progress for m.From (pr). - pr, prOk := r.prs[m.From] - if !prOk { - r.logger.Debugf("%x no progress available for %x", r.id, m.From) - return - } - switch m.Type { - case pb.MsgAppResp: - pr.RecentActive = true - - if m.Reject { - r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d", - r.id, m.RejectHint, m.From, m.Index) - if pr.maybeDecrTo(m.Index, m.RejectHint) { - r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr) - if pr.State == ProgressStateReplicate { - pr.becomeProbe() - } - r.sendAppend(m.From) - } - } else { - oldPaused := pr.IsPaused() - if pr.maybeUpdate(m.Index) { - switch { - case pr.State == ProgressStateProbe: - pr.becomeReplicate() - case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort(): - r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr) - pr.becomeProbe() - case pr.State == ProgressStateReplicate: - pr.ins.freeTo(m.Index) - } - - if r.maybeCommit() { - r.bcastAppend() - } else if oldPaused { - // update() reset the wait state on this node. If we had delayed sending - // an update before, send it now. - r.sendAppend(m.From) - } - // Transfer leadership is in progress. - if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() { - r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From) - r.sendTimeoutNow(m.From) - } - } - } - case pb.MsgHeartbeatResp: - pr.RecentActive = true - pr.resume() - - // free one slot for the full inflights window to allow progress. - if pr.State == ProgressStateReplicate && pr.ins.full() { - pr.ins.freeFirstOne() - } - if pr.Match < r.raftLog.lastIndex() { - r.sendAppend(m.From) - } - - if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 { - return - } - - ackCount := r.readOnly.recvAck(m) - if ackCount < r.quorum() { - return - } - - rss := r.readOnly.advance(m) - for _, rs := range rss { - req := rs.req - if req.From == None || req.From == r.id { // from local member - r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data}) - } else { - r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries}) - } - } - case pb.MsgSnapStatus: - if pr.State != ProgressStateSnapshot { - return - } - if !m.Reject { - pr.becomeProbe() - r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) - } else { - pr.snapshotFailure() - pr.becomeProbe() - r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr) - } - // If snapshot finish, wait for the msgAppResp from the remote node before sending - // out the next msgApp. - // If snapshot failure, wait for a heartbeat interval before next try - pr.pause() - case pb.MsgUnreachable: - // During optimistic replication, if the remote becomes unreachable, - // there is huge probability that a MsgApp is lost. - if pr.State == ProgressStateReplicate { - pr.becomeProbe() - } - r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr) - case pb.MsgTransferLeader: - leadTransferee := m.From - lastLeadTransferee := r.leadTransferee - if lastLeadTransferee != None { - if lastLeadTransferee == leadTransferee { - r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x", - r.id, r.Term, leadTransferee, leadTransferee) - return - } - r.abortLeaderTransfer() - r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee) - } - if leadTransferee == r.id { - r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id) - return - } - // Transfer leadership to third party. - r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee) - // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed. - r.electionElapsed = 0 - r.leadTransferee = leadTransferee - if pr.Match == r.raftLog.lastIndex() { - r.sendTimeoutNow(leadTransferee) - r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee) - } else { - r.sendAppend(leadTransferee) - } - } -} - -// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is -// whether they respond to MsgVoteResp or MsgPreVoteResp. -func stepCandidate(r *raft, m pb.Message) { - // Only handle vote responses corresponding to our candidacy (while in - // StateCandidate, we may get stale MsgPreVoteResp messages in this term from - // our pre-candidate state). - var myVoteRespType pb.MessageType - if r.state == StatePreCandidate { - myVoteRespType = pb.MsgPreVoteResp - } else { - myVoteRespType = pb.MsgVoteResp - } - switch m.Type { - case pb.MsgProp: - r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) - return - case pb.MsgApp: - r.becomeFollower(r.Term, m.From) - r.handleAppendEntries(m) - case pb.MsgHeartbeat: - r.becomeFollower(r.Term, m.From) - r.handleHeartbeat(m) - case pb.MsgSnap: - r.becomeFollower(m.Term, m.From) - r.handleSnapshot(m) - case myVoteRespType: - gr := r.poll(m.From, m.Type, !m.Reject) - r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr) - switch r.quorum() { - case gr: - if r.state == StatePreCandidate { - r.campaign(campaignElection) - } else { - r.becomeLeader() - r.bcastAppend() - } - case len(r.votes) - gr: - r.becomeFollower(r.Term, None) - } - case pb.MsgTimeoutNow: - r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From) - } -} - -func stepFollower(r *raft, m pb.Message) { - switch m.Type { - case pb.MsgProp: - if r.lead == None { - r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) - return - } - m.To = r.lead - r.send(m) - case pb.MsgApp: - r.electionElapsed = 0 - r.lead = m.From - r.handleAppendEntries(m) - case pb.MsgHeartbeat: - r.electionElapsed = 0 - r.lead = m.From - r.handleHeartbeat(m) - case pb.MsgSnap: - r.electionElapsed = 0 - r.lead = m.From - r.handleSnapshot(m) - case pb.MsgTransferLeader: - if r.lead == None { - r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term) - return - } - m.To = r.lead - r.send(m) - case pb.MsgTimeoutNow: - if r.promotable() { - r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From) - // Leadership transfers never use pre-vote even if r.preVote is true; we - // know we are not recovering from a partition so there is no need for the - // extra round trip. - r.campaign(campaignTransfer) - } else { - r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From) - } - case pb.MsgReadIndex: - if r.lead == None { - r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term) - return - } - m.To = r.lead - r.send(m) - case pb.MsgReadIndexResp: - if len(m.Entries) != 1 { - r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries)) - return - } - r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data}) - } -} - -func (r *raft) handleAppendEntries(m pb.Message) { - if m.Index < r.raftLog.committed { - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) - return - } - - if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) - } else { - r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x", - r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()}) - } -} - -func (r *raft) handleHeartbeat(m pb.Message) { - r.raftLog.commitTo(m.Commit) - r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context}) -} - -func (r *raft) handleSnapshot(m pb.Message) { - sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term - if r.restore(m.Snapshot) { - r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, sindex, sterm) - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()}) - } else { - r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, sindex, sterm) - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) - } -} - -// restore recovers the state machine from a snapshot. It restores the log and the -// configuration of state machine. -func (r *raft) restore(s pb.Snapshot) bool { - if s.Metadata.Index <= r.raftLog.committed { - return false - } - if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { - r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) - r.raftLog.commitTo(s.Metadata.Index) - return false - } - - r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) - - r.raftLog.restore(s) - r.prs = make(map[uint64]*Progress) - for _, n := range s.Metadata.ConfState.Nodes { - match, next := uint64(0), r.raftLog.lastIndex()+1 - if n == r.id { - match = next - 1 - } - r.setProgress(n, match, next) - r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.prs[n]) - } - return true -} - -// promotable indicates whether state machine can be promoted to leader, -// which is true when its own id is in progress list. -func (r *raft) promotable() bool { - _, ok := r.prs[r.id] - return ok -} - -func (r *raft) addNode(id uint64) { - r.pendingConf = false - if _, ok := r.prs[id]; ok { - // Ignore any redundant addNode calls (which can happen because the - // initial bootstrapping entries are applied twice). - return - } - - r.setProgress(id, 0, r.raftLog.lastIndex()+1) -} - -func (r *raft) removeNode(id uint64) { - r.delProgress(id) - r.pendingConf = false - - // do not try to commit or abort transferring if there is no nodes in the cluster. - if len(r.prs) == 0 { - return - } - - // The quorum size is now smaller, so see if any pending entries can - // be committed. - if r.maybeCommit() { - r.bcastAppend() - } - // If the removed node is the leadTransferee, then abort the leadership transferring. - if r.state == StateLeader && r.leadTransferee == id { - r.abortLeaderTransfer() - } -} - -func (r *raft) resetPendingConf() { r.pendingConf = false } - -func (r *raft) setProgress(id, match, next uint64) { - r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)} -} - -func (r *raft) delProgress(id uint64) { - delete(r.prs, id) -} - -func (r *raft) loadState(state pb.HardState) { - if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() { - r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex()) - } - r.raftLog.committed = state.Commit - r.Term = state.Term - r.Vote = state.Vote -} - -// pastElectionTimeout returns true iff r.electionElapsed is greater -// than or equal to the randomized election timeout in -// [electiontimeout, 2 * electiontimeout - 1]. -func (r *raft) pastElectionTimeout() bool { - return r.electionElapsed >= r.randomizedElectionTimeout -} - -func (r *raft) resetRandomizedElectionTimeout() { - r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout) -} - -// checkQuorumActive returns true if the quorum is active from -// the view of the local raft state machine. Otherwise, it returns -// false. -// checkQuorumActive also resets all RecentActive to false. -func (r *raft) checkQuorumActive() bool { - var act int - - for id := range r.prs { - if id == r.id { // self is always active - act++ - continue - } - - if r.prs[id].RecentActive { - act++ - } - - r.prs[id].RecentActive = false - } - - return act >= r.quorum() -} - -func (r *raft) sendTimeoutNow(to uint64) { - r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) -} - -func (r *raft) abortLeaderTransfer() { - r.leadTransferee = None -} - -func numOfPendingConf(ents []pb.Entry) int { - n := 0 - for i := range ents { - if ents[i].Type == pb.EntryConfChange { - n++ - } - } - return n -} diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go deleted file mode 100644 index 86ad3120708..00000000000 --- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ /dev/null @@ -1,1900 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: raft.proto -// DO NOT EDIT! - -/* - Package raftpb is a generated protocol buffer package. - - It is generated from these files: - raft.proto - - It has these top-level messages: - Entry - SnapshotMetadata - Snapshot - Message - HardState - ConfState - ConfChange -*/ -package raftpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type EntryType int32 - -const ( - EntryNormal EntryType = 0 - EntryConfChange EntryType = 1 -) - -var EntryType_name = map[int32]string{ - 0: "EntryNormal", - 1: "EntryConfChange", -} -var EntryType_value = map[string]int32{ - "EntryNormal": 0, - "EntryConfChange": 1, -} - -func (x EntryType) Enum() *EntryType { - p := new(EntryType) - *p = x - return p -} -func (x EntryType) String() string { - return proto.EnumName(EntryType_name, int32(x)) -} -func (x *EntryType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") - if err != nil { - return err - } - *x = EntryType(value) - return nil -} -func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } - -type MessageType int32 - -const ( - MsgHup MessageType = 0 - MsgBeat MessageType = 1 - MsgProp MessageType = 2 - MsgApp MessageType = 3 - MsgAppResp MessageType = 4 - MsgVote MessageType = 5 - MsgVoteResp MessageType = 6 - MsgSnap MessageType = 7 - MsgHeartbeat MessageType = 8 - MsgHeartbeatResp MessageType = 9 - MsgUnreachable MessageType = 10 - MsgSnapStatus MessageType = 11 - MsgCheckQuorum MessageType = 12 - MsgTransferLeader MessageType = 13 - MsgTimeoutNow MessageType = 14 - MsgReadIndex MessageType = 15 - MsgReadIndexResp MessageType = 16 - MsgPreVote MessageType = 17 - MsgPreVoteResp MessageType = 18 -) - -var MessageType_name = map[int32]string{ - 0: "MsgHup", - 1: "MsgBeat", - 2: "MsgProp", - 3: "MsgApp", - 4: "MsgAppResp", - 5: "MsgVote", - 6: "MsgVoteResp", - 7: "MsgSnap", - 8: "MsgHeartbeat", - 9: "MsgHeartbeatResp", - 10: "MsgUnreachable", - 11: "MsgSnapStatus", - 12: "MsgCheckQuorum", - 13: "MsgTransferLeader", - 14: "MsgTimeoutNow", - 15: "MsgReadIndex", - 16: "MsgReadIndexResp", - 17: "MsgPreVote", - 18: "MsgPreVoteResp", -} -var MessageType_value = map[string]int32{ - "MsgHup": 0, - "MsgBeat": 1, - "MsgProp": 2, - "MsgApp": 3, - "MsgAppResp": 4, - "MsgVote": 5, - "MsgVoteResp": 6, - "MsgSnap": 7, - "MsgHeartbeat": 8, - "MsgHeartbeatResp": 9, - "MsgUnreachable": 10, - "MsgSnapStatus": 11, - "MsgCheckQuorum": 12, - "MsgTransferLeader": 13, - "MsgTimeoutNow": 14, - "MsgReadIndex": 15, - "MsgReadIndexResp": 16, - "MsgPreVote": 17, - "MsgPreVoteResp": 18, -} - -func (x MessageType) Enum() *MessageType { - p := new(MessageType) - *p = x - return p -} -func (x MessageType) String() string { - return proto.EnumName(MessageType_name, int32(x)) -} -func (x *MessageType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") - if err != nil { - return err - } - *x = MessageType(value) - return nil -} -func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } - -type ConfChangeType int32 - -const ( - ConfChangeAddNode ConfChangeType = 0 - ConfChangeRemoveNode ConfChangeType = 1 - ConfChangeUpdateNode ConfChangeType = 2 -) - -var ConfChangeType_name = map[int32]string{ - 0: "ConfChangeAddNode", - 1: "ConfChangeRemoveNode", - 2: "ConfChangeUpdateNode", -} -var ConfChangeType_value = map[string]int32{ - "ConfChangeAddNode": 0, - "ConfChangeRemoveNode": 1, - "ConfChangeUpdateNode": 2, -} - -func (x ConfChangeType) Enum() *ConfChangeType { - p := new(ConfChangeType) - *p = x - return p -} -func (x ConfChangeType) String() string { - return proto.EnumName(ConfChangeType_name, int32(x)) -} -func (x *ConfChangeType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") - if err != nil { - return err - } - *x = ConfChangeType(value) - return nil -} -func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } - -type Entry struct { - Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` - Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` - Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` - Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Entry) Reset() { *m = Entry{} } -func (m *Entry) String() string { return proto.CompactTextString(m) } -func (*Entry) ProtoMessage() {} -func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } - -type SnapshotMetadata struct { - ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` - Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` - Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } -func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } -func (*SnapshotMetadata) ProtoMessage() {} -func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } - -type Snapshot struct { - Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } - -type Message struct { - Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` - To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` - From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` - Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` - LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` - Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` - Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` - Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` - Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` - Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` - RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` - Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } - -type HardState struct { - Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` - Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` - Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *HardState) Reset() { *m = HardState{} } -func (m *HardState) String() string { return proto.CompactTextString(m) } -func (*HardState) ProtoMessage() {} -func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } - -type ConfState struct { - Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ConfState) Reset() { *m = ConfState{} } -func (m *ConfState) String() string { return proto.CompactTextString(m) } -func (*ConfState) ProtoMessage() {} -func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } - -type ConfChange struct { - ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` - Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"` - NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"` - Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ConfChange) Reset() { *m = ConfChange{} } -func (m *ConfChange) String() string { return proto.CompactTextString(m) } -func (*ConfChange) ProtoMessage() {} -func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } - -func init() { - proto.RegisterType((*Entry)(nil), "raftpb.Entry") - proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") - proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") - proto.RegisterType((*Message)(nil), "raftpb.Message") - proto.RegisterType((*HardState)(nil), "raftpb.HardState") - proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") - proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") - proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) - proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) - proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) -} -func (m *Entry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Entry) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Type)) - dAtA[i] = 0x10 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - dAtA[i] = 0x18 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Index)) - if m.Data != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size())) - n1, err := m.ConfState.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Index)) - dAtA[i] = 0x18 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Snapshot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Data != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - dAtA[i] = 0x12 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size())) - n2, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Message) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Message) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Type)) - dAtA[i] = 0x10 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.To)) - dAtA[i] = 0x18 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.From)) - dAtA[i] = 0x20 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - dAtA[i] = 0x28 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) - dAtA[i] = 0x30 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Index)) - if len(m.Entries) > 0 { - for _, msg := range m.Entries { - dAtA[i] = 0x3a - i++ - i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x40 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) - dAtA[i] = 0x4a - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size())) - n3, err := m.Snapshot.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x50 - i++ - if m.Reject { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x58 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) - if m.Context != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) - i += copy(dAtA[i:], m.Context) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *HardState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HardState) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - dAtA[i] = 0x10 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) - dAtA[i] = 0x18 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ConfState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Nodes) > 0 { - for _, num := range m.Nodes { - dAtA[i] = 0x8 - i++ - i = encodeVarintRaft(dAtA, i, uint64(num)) - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ConfChange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.ID)) - dAtA[i] = 0x10 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.Type)) - dAtA[i] = 0x18 - i++ - i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) - if m.Context != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) - i += copy(dAtA[i:], m.Context) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Raft(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Raft(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Entry) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.Type)) - n += 1 + sovRaft(uint64(m.Term)) - n += 1 + sovRaft(uint64(m.Index)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovRaft(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SnapshotMetadata) Size() (n int) { - var l int - _ = l - l = m.ConfState.Size() - n += 1 + l + sovRaft(uint64(l)) - n += 1 + sovRaft(uint64(m.Index)) - n += 1 + sovRaft(uint64(m.Term)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Snapshot) Size() (n int) { - var l int - _ = l - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovRaft(uint64(l)) - } - l = m.Metadata.Size() - n += 1 + l + sovRaft(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Message) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.Type)) - n += 1 + sovRaft(uint64(m.To)) - n += 1 + sovRaft(uint64(m.From)) - n += 1 + sovRaft(uint64(m.Term)) - n += 1 + sovRaft(uint64(m.LogTerm)) - n += 1 + sovRaft(uint64(m.Index)) - if len(m.Entries) > 0 { - for _, e := range m.Entries { - l = e.Size() - n += 1 + l + sovRaft(uint64(l)) - } - } - n += 1 + sovRaft(uint64(m.Commit)) - l = m.Snapshot.Size() - n += 1 + l + sovRaft(uint64(l)) - n += 2 - n += 1 + sovRaft(uint64(m.RejectHint)) - if m.Context != nil { - l = len(m.Context) - n += 1 + l + sovRaft(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HardState) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.Term)) - n += 1 + sovRaft(uint64(m.Vote)) - n += 1 + sovRaft(uint64(m.Commit)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConfState) Size() (n int) { - var l int - _ = l - if len(m.Nodes) > 0 { - for _, e := range m.Nodes { - n += 1 + sovRaft(uint64(e)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConfChange) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.ID)) - n += 1 + sovRaft(uint64(m.Type)) - n += 1 + sovRaft(uint64(m.NodeID)) - if m.Context != nil { - l = len(m.Context) - n += 1 + l + sovRaft(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRaft(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRaft(x uint64) (n int) { - return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Entry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Entry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= (EntryType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaft(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaft(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Snapshot) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaft(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Message) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Message: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= (MessageType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) - } - m.To = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.To |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) - } - m.From = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.From |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) - } - m.LogTerm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LogTerm |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entries = append(m.Entries, Entry{}) - if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) - } - m.Commit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Commit |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Reject = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) - } - m.RejectHint = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RejectHint |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) - if m.Context == nil { - m.Context = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaft(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HardState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HardState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) - } - m.Vote = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Vote |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) - } - m.Commit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Commit |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaft(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Nodes = append(m.Nodes, v) - default: - iNdEx = preIndex - skippy, err := skipRaft(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfChange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= (ConfChangeType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) - } - m.NodeID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NodeID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) - if m.Context == nil { - m.Context = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaft(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRaft(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRaft - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRaft(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } - -var fileDescriptorRaft = []byte{ - // 790 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0x16, 0x29, 0xea, 0x6f, 0x28, 0xcb, 0xab, 0xb5, 0x5a, 0x2c, 0x0c, 0x43, 0x55, 0x85, 0x1e, - 0x04, 0x17, 0x76, 0x5b, 0x1d, 0x7a, 0xe8, 0xcd, 0x96, 0x0a, 0x58, 0x40, 0x65, 0xb8, 0xb2, 0xdc, - 0x43, 0x83, 0x20, 0x58, 0x8b, 0x2b, 0x4a, 0x89, 0xc9, 0x25, 0x96, 0x2b, 0xc7, 0xbe, 0x04, 0x79, - 0x80, 0x3c, 0x40, 0x2e, 0x79, 0x1f, 0x1f, 0x0d, 0xe4, 0x1e, 0xc4, 0xce, 0x8b, 0x04, 0xbb, 0x5c, - 0x4a, 0x94, 0x74, 0xdb, 0xf9, 0xbe, 0xe1, 0xcc, 0x37, 0xdf, 0xce, 0x12, 0x40, 0xd0, 0xa9, 0x3c, - 0x8e, 0x04, 0x97, 0x1c, 0x17, 0xd5, 0x39, 0xba, 0xde, 0x6f, 0xf8, 0xdc, 0xe7, 0x1a, 0xfa, 0x4d, - 0x9d, 0x12, 0xb6, 0xfd, 0x0e, 0x0a, 0x7f, 0x87, 0x52, 0xdc, 0xe3, 0x5f, 0xc1, 0x19, 0xdf, 0x47, - 0x8c, 0x58, 0x2d, 0xab, 0x53, 0xeb, 0xd6, 0x8f, 0x93, 0xaf, 0x8e, 0x35, 0xa9, 0x88, 0x53, 0xe7, - 0xe1, 0xcb, 0x4f, 0xb9, 0x91, 0x4e, 0xc2, 0x04, 0x9c, 0x31, 0x13, 0x01, 0xb1, 0x5b, 0x56, 0xc7, - 0x59, 0x32, 0x4c, 0x04, 0x78, 0x1f, 0x0a, 0x83, 0xd0, 0x63, 0x77, 0x24, 0x9f, 0xa1, 0x12, 0x08, - 0x63, 0x70, 0xfa, 0x54, 0x52, 0xe2, 0xb4, 0xac, 0x4e, 0x75, 0xa4, 0xcf, 0xed, 0xf7, 0x16, 0xa0, - 0xcb, 0x90, 0x46, 0xf1, 0x8c, 0xcb, 0x21, 0x93, 0xd4, 0xa3, 0x92, 0xe2, 0x3f, 0x01, 0x26, 0x3c, - 0x9c, 0xbe, 0x8a, 0x25, 0x95, 0x89, 0x22, 0x77, 0xa5, 0xa8, 0xc7, 0xc3, 0xe9, 0xa5, 0x22, 0x4c, - 0xf1, 0xca, 0x24, 0x05, 0x54, 0xf3, 0xb9, 0x6e, 0x9e, 0xd5, 0x95, 0x40, 0x4a, 0xb2, 0x54, 0x92, - 0xb3, 0xba, 0x34, 0xd2, 0xfe, 0x1f, 0xca, 0xa9, 0x02, 0x25, 0x51, 0x29, 0xd0, 0x3d, 0xab, 0x23, - 0x7d, 0xc6, 0x7f, 0x41, 0x39, 0x30, 0xca, 0x74, 0x61, 0xb7, 0x4b, 0x52, 0x2d, 0x9b, 0xca, 0x4d, - 0xdd, 0x65, 0x7e, 0xfb, 0x53, 0x1e, 0x4a, 0x43, 0x16, 0xc7, 0xd4, 0x67, 0xf8, 0x08, 0x1c, 0xb9, - 0x72, 0x78, 0x2f, 0xad, 0x61, 0xe8, 0xac, 0xc7, 0x2a, 0x0d, 0x37, 0xc0, 0x96, 0x7c, 0x6d, 0x12, - 0x5b, 0x72, 0x35, 0xc6, 0x54, 0xf0, 0x8d, 0x31, 0x14, 0xb2, 0x1c, 0xd0, 0xd9, 0x1c, 0x10, 0x37, - 0xa1, 0x74, 0xc3, 0x7d, 0x7d, 0x61, 0x85, 0x0c, 0x99, 0x82, 0x2b, 0xdb, 0x8a, 0xdb, 0xb6, 0x1d, - 0x41, 0x89, 0x85, 0x52, 0xcc, 0x59, 0x4c, 0x4a, 0xad, 0x7c, 0xc7, 0xed, 0xee, 0xac, 0x6d, 0x46, - 0x5a, 0xca, 0xe4, 0xe0, 0x03, 0x28, 0x4e, 0x78, 0x10, 0xcc, 0x25, 0x29, 0x67, 0x6a, 0x19, 0x0c, - 0x77, 0xa1, 0x1c, 0x1b, 0xc7, 0x48, 0x45, 0x3b, 0x89, 0x36, 0x9d, 0x4c, 0x1d, 0x4c, 0xf3, 0x54, - 0x45, 0xc1, 0x5e, 0xb3, 0x89, 0x24, 0xd0, 0xb2, 0x3a, 0xe5, 0xb4, 0x62, 0x82, 0xe1, 0x5f, 0x00, - 0x92, 0xd3, 0xd9, 0x3c, 0x94, 0xc4, 0xcd, 0xf4, 0xcc, 0xe0, 0x98, 0x40, 0x69, 0xc2, 0x43, 0xc9, - 0xee, 0x24, 0xa9, 0xea, 0x8b, 0x4d, 0xc3, 0xf6, 0x4b, 0xa8, 0x9c, 0x51, 0xe1, 0x25, 0xeb, 0x93, - 0x3a, 0x68, 0x6d, 0x39, 0x48, 0xc0, 0xb9, 0xe5, 0x92, 0xad, 0xef, 0xbb, 0x42, 0x32, 0x03, 0xe7, - 0xb7, 0x07, 0x6e, 0xff, 0x0c, 0x95, 0xe5, 0xba, 0xe2, 0x06, 0x14, 0x42, 0xee, 0xb1, 0x98, 0x58, - 0xad, 0x7c, 0xc7, 0x19, 0x25, 0x41, 0xfb, 0x83, 0x05, 0xa0, 0x72, 0x7a, 0x33, 0x1a, 0xfa, 0xfa, - 0xd6, 0x07, 0xfd, 0x35, 0x05, 0xf6, 0xa0, 0x8f, 0x7f, 0x37, 0x8f, 0xd3, 0xd6, 0xab, 0xf3, 0x63, - 0xf6, 0x29, 0x24, 0xdf, 0x6d, 0xbd, 0xd0, 0x03, 0x28, 0x9e, 0x73, 0x8f, 0x0d, 0xfa, 0xeb, 0xba, - 0x12, 0x4c, 0x19, 0xd2, 0x33, 0x86, 0x24, 0x8f, 0x31, 0x0d, 0x0f, 0xff, 0x80, 0xca, 0xf2, 0xc9, - 0xe3, 0x5d, 0x70, 0x75, 0x70, 0xce, 0x45, 0x40, 0x6f, 0x50, 0x0e, 0xef, 0xc1, 0xae, 0x06, 0x56, - 0x8d, 0x91, 0x75, 0xf8, 0xd9, 0x06, 0x37, 0xb3, 0xc4, 0x18, 0xa0, 0x38, 0x8c, 0xfd, 0xb3, 0x45, - 0x84, 0x72, 0xd8, 0x85, 0xd2, 0x30, 0xf6, 0x4f, 0x19, 0x95, 0xc8, 0x32, 0xc1, 0x85, 0xe0, 0x11, - 0xb2, 0x4d, 0xd6, 0x49, 0x14, 0xa1, 0x3c, 0xae, 0x01, 0x24, 0xe7, 0x11, 0x8b, 0x23, 0xe4, 0x98, - 0xc4, 0xff, 0xb8, 0x64, 0xa8, 0xa0, 0x44, 0x98, 0x40, 0xb3, 0x45, 0xc3, 0xaa, 0x85, 0x41, 0x25, - 0x8c, 0xa0, 0xaa, 0x9a, 0x31, 0x2a, 0xe4, 0xb5, 0xea, 0x52, 0xc6, 0x0d, 0x40, 0x59, 0x44, 0x7f, - 0x54, 0xc1, 0x18, 0x6a, 0xc3, 0xd8, 0xbf, 0x0a, 0x05, 0xa3, 0x93, 0x19, 0xbd, 0xbe, 0x61, 0x08, - 0x70, 0x1d, 0x76, 0x4c, 0x21, 0x75, 0x41, 0x8b, 0x18, 0xb9, 0x26, 0xad, 0x37, 0x63, 0x93, 0x37, - 0xff, 0x2e, 0xb8, 0x58, 0x04, 0xa8, 0x8a, 0x7f, 0x80, 0xfa, 0x30, 0xf6, 0xc7, 0x82, 0x86, 0xf1, - 0x94, 0x89, 0x7f, 0x18, 0xf5, 0x98, 0x40, 0x3b, 0xe6, 0xeb, 0xf1, 0x3c, 0x60, 0x7c, 0x21, 0xcf, - 0xf9, 0x5b, 0x54, 0x33, 0x62, 0x46, 0x8c, 0x7a, 0xfa, 0x87, 0x87, 0x76, 0x8d, 0x98, 0x25, 0xa2, - 0xc5, 0x20, 0x33, 0xef, 0x85, 0x60, 0x7a, 0xc4, 0xba, 0xe9, 0x6a, 0x62, 0x9d, 0x83, 0x0f, 0x5f, - 0x40, 0x6d, 0xfd, 0x7a, 0x95, 0x8e, 0x15, 0x72, 0xe2, 0x79, 0xea, 0x2e, 0x51, 0x0e, 0x13, 0x68, - 0xac, 0xe0, 0x11, 0x0b, 0xf8, 0x2d, 0xd3, 0x8c, 0xb5, 0xce, 0x5c, 0x45, 0x1e, 0x95, 0x09, 0x63, - 0x9f, 0x92, 0x87, 0xa7, 0x66, 0xee, 0xf1, 0xa9, 0x99, 0x7b, 0x78, 0x6e, 0x5a, 0x8f, 0xcf, 0x4d, - 0xeb, 0xeb, 0x73, 0xd3, 0xfa, 0xf8, 0xad, 0x99, 0xfb, 0x1e, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x30, - 0x01, 0x41, 0x3a, 0x06, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/raft/rawnode.go b/vendor/github.com/coreos/etcd/raft/rawnode.go deleted file mode 100644 index b950d5169a5..00000000000 --- a/vendor/github.com/coreos/etcd/raft/rawnode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -// ErrStepLocalMsg is returned when try to step a local raft message -var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") - -// ErrStepPeerNotFound is returned when try to step a response message -// but there is no peer found in raft.prs for that node. -var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") - -// RawNode is a thread-unsafe Node. -// The methods of this struct correspond to the methods of Node and are described -// more fully there. -type RawNode struct { - raft *raft - prevSoftSt *SoftState - prevHardSt pb.HardState -} - -func (rn *RawNode) newReady() Ready { - return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) -} - -func (rn *RawNode) commitReady(rd Ready) { - if rd.SoftState != nil { - rn.prevSoftSt = rd.SoftState - } - if !IsEmptyHardState(rd.HardState) { - rn.prevHardSt = rd.HardState - } - if rn.prevHardSt.Commit != 0 { - // In most cases, prevHardSt and rd.HardState will be the same - // because when there are new entries to apply we just sent a - // HardState with an updated Commit value. However, on initial - // startup the two are different because we don't send a HardState - // until something changes, but we do send any un-applied but - // committed entries (and previously-committed entries may be - // incorporated into the snapshot, even if rd.CommittedEntries is - // empty). Therefore we mark all committed entries as applied - // whether they were included in rd.HardState or not. - rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit) - } - if len(rd.Entries) > 0 { - e := rd.Entries[len(rd.Entries)-1] - rn.raft.raftLog.stableTo(e.Index, e.Term) - } - if !IsEmptySnap(rd.Snapshot) { - rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) - } - if len(rd.ReadStates) != 0 { - rn.raft.readStates = nil - } -} - -// NewRawNode returns a new RawNode given configuration and a list of raft peers. -func NewRawNode(config *Config, peers []Peer) (*RawNode, error) { - if config.ID == 0 { - panic("config.ID must not be zero") - } - r := newRaft(config) - rn := &RawNode{ - raft: r, - } - lastIndex, err := config.Storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - // If the log is empty, this is a new RawNode (like StartNode); otherwise it's - // restoring an existing RawNode (like RestartNode). - // TODO(bdarnell): rethink RawNode initialization and whether the application needs - // to be able to tell us when it expects the RawNode to exist. - if lastIndex == 0 { - r.becomeFollower(1, None) - ents := make([]pb.Entry, len(peers)) - for i, peer := range peers { - cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} - data, err := cc.Marshal() - if err != nil { - panic("unexpected marshal error") - } - - ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} - } - r.raftLog.append(ents...) - r.raftLog.committed = uint64(len(ents)) - for _, peer := range peers { - r.addNode(peer.ID) - } - } - - // Set the initial hard and soft states after performing all initialization. - rn.prevSoftSt = r.softState() - if lastIndex == 0 { - rn.prevHardSt = emptyState - } else { - rn.prevHardSt = r.hardState() - } - - return rn, nil -} - -// Tick advances the internal logical clock by a single tick. -func (rn *RawNode) Tick() { - rn.raft.tick() -} - -// TickQuiesced advances the internal logical clock by a single tick without -// performing any other state machine processing. It allows the caller to avoid -// periodic heartbeats and elections when all of the peers in a Raft group are -// known to be at the same state. Expected usage is to periodically invoke Tick -// or TickQuiesced depending on whether the group is "active" or "quiesced". -// -// WARNING: Be very careful about using this method as it subverts the Raft -// state machine. You should probably be using Tick instead. -func (rn *RawNode) TickQuiesced() { - rn.raft.electionElapsed++ -} - -// Campaign causes this RawNode to transition to candidate state. -func (rn *RawNode) Campaign() error { - return rn.raft.Step(pb.Message{ - Type: pb.MsgHup, - }) -} - -// Propose proposes data be appended to the raft log. -func (rn *RawNode) Propose(data []byte) error { - return rn.raft.Step(pb.Message{ - Type: pb.MsgProp, - From: rn.raft.id, - Entries: []pb.Entry{ - {Data: data}, - }}) -} - -// ProposeConfChange proposes a config change. -func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error { - data, err := cc.Marshal() - if err != nil { - return err - } - return rn.raft.Step(pb.Message{ - Type: pb.MsgProp, - Entries: []pb.Entry{ - {Type: pb.EntryConfChange, Data: data}, - }, - }) -} - -// ApplyConfChange applies a config change to the local node. -func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { - if cc.NodeID == None { - rn.raft.resetPendingConf() - return &pb.ConfState{Nodes: rn.raft.nodes()} - } - switch cc.Type { - case pb.ConfChangeAddNode: - rn.raft.addNode(cc.NodeID) - case pb.ConfChangeRemoveNode: - rn.raft.removeNode(cc.NodeID) - case pb.ConfChangeUpdateNode: - rn.raft.resetPendingConf() - default: - panic("unexpected conf type") - } - return &pb.ConfState{Nodes: rn.raft.nodes()} -} - -// Step advances the state machine using the given message. -func (rn *RawNode) Step(m pb.Message) error { - // ignore unexpected local messages receiving over network - if IsLocalMsg(m.Type) { - return ErrStepLocalMsg - } - if _, ok := rn.raft.prs[m.From]; ok || !IsResponseMsg(m.Type) { - return rn.raft.Step(m) - } - return ErrStepPeerNotFound -} - -// Ready returns the current point-in-time state of this RawNode. -func (rn *RawNode) Ready() Ready { - rd := rn.newReady() - rn.raft.msgs = nil - return rd -} - -// HasReady called when RawNode user need to check if any Ready pending. -// Checking logic in this method should be consistent with Ready.containsUpdates(). -func (rn *RawNode) HasReady() bool { - r := rn.raft - if !r.softState().equal(rn.prevSoftSt) { - return true - } - if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) { - return true - } - if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) { - return true - } - if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() { - return true - } - if len(r.readStates) != 0 { - return true - } - return false -} - -// Advance notifies the RawNode that the application has applied and saved progress in the -// last Ready results. -func (rn *RawNode) Advance(rd Ready) { - rn.commitReady(rd) -} - -// Status returns the current status of the given group. -func (rn *RawNode) Status() *Status { - status := getStatus(rn.raft) - return &status -} - -// ReportUnreachable reports the given node is not reachable for the last send. -func (rn *RawNode) ReportUnreachable(id uint64) { - _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) -} - -// ReportSnapshot reports the status of the sent snapshot. -func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) { - rej := status == SnapshotFailure - - _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}) -} - -// TransferLeader tries to transfer leadership to the given transferee. -func (rn *RawNode) TransferLeader(transferee uint64) { - _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee}) -} - -// ReadIndex requests a read state. The read state will be set in ready. -// Read State has a read index. Once the application advances further than the read -// index, any linearizable read requests issued before the read request can be -// processed safely. The read state will have the same rctx attached. -func (rn *RawNode) ReadIndex(rctx []byte) { - _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}}) -} diff --git a/vendor/github.com/coreos/etcd/raft/read_only.go b/vendor/github.com/coreos/etcd/raft/read_only.go deleted file mode 100644 index d0085237e36..00000000000 --- a/vendor/github.com/coreos/etcd/raft/read_only.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import pb "github.com/coreos/etcd/raft/raftpb" - -// ReadState provides state for read only query. -// It's caller's responsibility to call ReadIndex first before getting -// this state from ready, It's also caller's duty to differentiate if this -// state is what it requests through RequestCtx, eg. given a unique id as -// RequestCtx -type ReadState struct { - Index uint64 - RequestCtx []byte -} - -type readIndexStatus struct { - req pb.Message - index uint64 - acks map[uint64]struct{} -} - -type readOnly struct { - option ReadOnlyOption - pendingReadIndex map[string]*readIndexStatus - readIndexQueue []string -} - -func newReadOnly(option ReadOnlyOption) *readOnly { - return &readOnly{ - option: option, - pendingReadIndex: make(map[string]*readIndexStatus), - } -} - -// addRequest adds a read only reuqest into readonly struct. -// `index` is the commit index of the raft state machine when it received -// the read only request. -// `m` is the original read only request message from the local or remote node. -func (ro *readOnly) addRequest(index uint64, m pb.Message) { - ctx := string(m.Entries[0].Data) - if _, ok := ro.pendingReadIndex[ctx]; ok { - return - } - ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})} - ro.readIndexQueue = append(ro.readIndexQueue, ctx) -} - -// recvAck notifies the readonly struct that the raft state machine received -// an acknowledgment of the heartbeat that attached with the read only request -// context. -func (ro *readOnly) recvAck(m pb.Message) int { - rs, ok := ro.pendingReadIndex[string(m.Context)] - if !ok { - return 0 - } - - rs.acks[m.From] = struct{}{} - // add one to include an ack from local node - return len(rs.acks) + 1 -} - -// advance advances the read only request queue kept by the readonly struct. -// It dequeues the requests until it finds the read only request that has -// the same context as the given `m`. -func (ro *readOnly) advance(m pb.Message) []*readIndexStatus { - var ( - i int - found bool - ) - - ctx := string(m.Context) - rss := []*readIndexStatus{} - - for _, okctx := range ro.readIndexQueue { - i++ - rs, ok := ro.pendingReadIndex[okctx] - if !ok { - panic("cannot find corresponding read state from pending map") - } - rss = append(rss, rs) - if okctx == ctx { - found = true - break - } - } - - if found { - ro.readIndexQueue = ro.readIndexQueue[i:] - for _, rs := range rss { - delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data)) - } - return rss - } - - return nil -} - -// lastPendingRequestCtx returns the context of the last pending read only -// request in readonly struct. -func (ro *readOnly) lastPendingRequestCtx() string { - if len(ro.readIndexQueue) == 0 { - return "" - } - return ro.readIndexQueue[len(ro.readIndexQueue)-1] -} diff --git a/vendor/github.com/coreos/etcd/raft/status.go b/vendor/github.com/coreos/etcd/raft/status.go deleted file mode 100644 index b690fa56b95..00000000000 --- a/vendor/github.com/coreos/etcd/raft/status.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "fmt" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -type Status struct { - ID uint64 - - pb.HardState - SoftState - - Applied uint64 - Progress map[uint64]Progress -} - -// getStatus gets a copy of the current raft status. -func getStatus(r *raft) Status { - s := Status{ID: r.id} - s.HardState = r.hardState() - s.SoftState = *r.softState() - - s.Applied = r.raftLog.applied - - if s.RaftState == StateLeader { - s.Progress = make(map[uint64]Progress) - for id, p := range r.prs { - s.Progress[id] = *p - } - } - - return s -} - -// MarshalJSON translates the raft status into JSON. -// TODO: try to simplify this by introducing ID type into raft -func (s Status) MarshalJSON() ([]byte, error) { - j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"progress":{`, - s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState) - - if len(s.Progress) == 0 { - j += "}}" - } else { - for k, v := range s.Progress { - subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State) - j += subj - } - // remove the trailing "," - j = j[:len(j)-1] + "}}" - } - return []byte(j), nil -} - -func (s Status) String() string { - b, err := s.MarshalJSON() - if err != nil { - raftLogger.Panicf("unexpected error: %v", err) - } - return string(b) -} diff --git a/vendor/github.com/coreos/etcd/raft/storage.go b/vendor/github.com/coreos/etcd/raft/storage.go deleted file mode 100644 index 69c3a7d9033..00000000000 --- a/vendor/github.com/coreos/etcd/raft/storage.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - "sync" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -// ErrCompacted is returned by Storage.Entries/Compact when a requested -// index is unavailable because it predates the last snapshot. -var ErrCompacted = errors.New("requested index is unavailable due to compaction") - -// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested -// index is older than the existing snapshot. -var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot") - -// ErrUnavailable is returned by Storage interface when the requested log entries -// are unavailable. -var ErrUnavailable = errors.New("requested entry at index is unavailable") - -// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required -// snapshot is temporarily unavailable. -var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable") - -// Storage is an interface that may be implemented by the application -// to retrieve log entries from storage. -// -// If any Storage method returns an error, the raft instance will -// become inoperable and refuse to participate in elections; the -// application is responsible for cleanup and recovery in this case. -type Storage interface { - // InitialState returns the saved HardState and ConfState information. - InitialState() (pb.HardState, pb.ConfState, error) - // Entries returns a slice of log entries in the range [lo,hi). - // MaxSize limits the total size of the log entries returned, but - // Entries returns at least one entry if any. - Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) - // Term returns the term of entry i, which must be in the range - // [FirstIndex()-1, LastIndex()]. The term of the entry before - // FirstIndex is retained for matching purposes even though the - // rest of that entry may not be available. - Term(i uint64) (uint64, error) - // LastIndex returns the index of the last entry in the log. - LastIndex() (uint64, error) - // FirstIndex returns the index of the first log entry that is - // possibly available via Entries (older entries have been incorporated - // into the latest Snapshot; if storage only contains the dummy entry the - // first log entry is not available). - FirstIndex() (uint64, error) - // Snapshot returns the most recent snapshot. - // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable, - // so raft state machine could know that Storage needs some time to prepare - // snapshot and call Snapshot later. - Snapshot() (pb.Snapshot, error) -} - -// MemoryStorage implements the Storage interface backed by an -// in-memory array. -type MemoryStorage struct { - // Protects access to all fields. Most methods of MemoryStorage are - // run on the raft goroutine, but Append() is run on an application - // goroutine. - sync.Mutex - - hardState pb.HardState - snapshot pb.Snapshot - // ents[i] has raft log position i+snapshot.Metadata.Index - ents []pb.Entry -} - -// NewMemoryStorage creates an empty MemoryStorage. -func NewMemoryStorage() *MemoryStorage { - return &MemoryStorage{ - // When starting from scratch populate the list with a dummy entry at term zero. - ents: make([]pb.Entry, 1), - } -} - -// InitialState implements the Storage interface. -func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) { - return ms.hardState, ms.snapshot.Metadata.ConfState, nil -} - -// SetHardState saves the current HardState. -func (ms *MemoryStorage) SetHardState(st pb.HardState) error { - ms.Lock() - defer ms.Unlock() - ms.hardState = st - return nil -} - -// Entries implements the Storage interface. -func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { - ms.Lock() - defer ms.Unlock() - offset := ms.ents[0].Index - if lo <= offset { - return nil, ErrCompacted - } - if hi > ms.lastIndex()+1 { - raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex()) - } - // only contains dummy entries. - if len(ms.ents) == 1 { - return nil, ErrUnavailable - } - - ents := ms.ents[lo-offset : hi-offset] - return limitSize(ents, maxSize), nil -} - -// Term implements the Storage interface. -func (ms *MemoryStorage) Term(i uint64) (uint64, error) { - ms.Lock() - defer ms.Unlock() - offset := ms.ents[0].Index - if i < offset { - return 0, ErrCompacted - } - if int(i-offset) >= len(ms.ents) { - return 0, ErrUnavailable - } - return ms.ents[i-offset].Term, nil -} - -// LastIndex implements the Storage interface. -func (ms *MemoryStorage) LastIndex() (uint64, error) { - ms.Lock() - defer ms.Unlock() - return ms.lastIndex(), nil -} - -func (ms *MemoryStorage) lastIndex() uint64 { - return ms.ents[0].Index + uint64(len(ms.ents)) - 1 -} - -// FirstIndex implements the Storage interface. -func (ms *MemoryStorage) FirstIndex() (uint64, error) { - ms.Lock() - defer ms.Unlock() - return ms.firstIndex(), nil -} - -func (ms *MemoryStorage) firstIndex() uint64 { - return ms.ents[0].Index + 1 -} - -// Snapshot implements the Storage interface. -func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) { - ms.Lock() - defer ms.Unlock() - return ms.snapshot, nil -} - -// ApplySnapshot overwrites the contents of this Storage object with -// those of the given snapshot. -func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error { - ms.Lock() - defer ms.Unlock() - - //handle check for old snapshot being applied - msIndex := ms.snapshot.Metadata.Index - snapIndex := snap.Metadata.Index - if msIndex >= snapIndex { - return ErrSnapOutOfDate - } - - ms.snapshot = snap - ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}} - return nil -} - -// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and -// can be used to reconstruct the state at that point. -// If any configuration changes have been made since the last compaction, -// the result of the last ApplyConfChange must be passed in. -func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) { - ms.Lock() - defer ms.Unlock() - if i <= ms.snapshot.Metadata.Index { - return pb.Snapshot{}, ErrSnapOutOfDate - } - - offset := ms.ents[0].Index - if i > ms.lastIndex() { - raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex()) - } - - ms.snapshot.Metadata.Index = i - ms.snapshot.Metadata.Term = ms.ents[i-offset].Term - if cs != nil { - ms.snapshot.Metadata.ConfState = *cs - } - ms.snapshot.Data = data - return ms.snapshot, nil -} - -// Compact discards all log entries prior to compactIndex. -// It is the application's responsibility to not attempt to compact an index -// greater than raftLog.applied. -func (ms *MemoryStorage) Compact(compactIndex uint64) error { - ms.Lock() - defer ms.Unlock() - offset := ms.ents[0].Index - if compactIndex <= offset { - return ErrCompacted - } - if compactIndex > ms.lastIndex() { - raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex()) - } - - i := compactIndex - offset - ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) - ents[0].Index = ms.ents[i].Index - ents[0].Term = ms.ents[i].Term - ents = append(ents, ms.ents[i+1:]...) - ms.ents = ents - return nil -} - -// Append the new entries to storage. -// TODO (xiangli): ensure the entries are continuous and -// entries[0].Index > ms.entries[0].Index -func (ms *MemoryStorage) Append(entries []pb.Entry) error { - if len(entries) == 0 { - return nil - } - - ms.Lock() - defer ms.Unlock() - - first := ms.firstIndex() - last := entries[0].Index + uint64(len(entries)) - 1 - - // shortcut if there is no new entry. - if last < first { - return nil - } - // truncate compacted entries - if first > entries[0].Index { - entries = entries[first-entries[0].Index:] - } - - offset := entries[0].Index - ms.ents[0].Index - switch { - case uint64(len(ms.ents)) > offset: - ms.ents = append([]pb.Entry{}, ms.ents[:offset]...) - ms.ents = append(ms.ents, entries...) - case uint64(len(ms.ents)) == offset: - ms.ents = append(ms.ents, entries...) - default: - raftLogger.Panicf("missing log entry [last: %d, append at: %d]", - ms.lastIndex(), entries[0].Index) - } - return nil -} diff --git a/vendor/github.com/coreos/etcd/raft/util.go b/vendor/github.com/coreos/etcd/raft/util.go deleted file mode 100644 index f4141fe65dd..00000000000 --- a/vendor/github.com/coreos/etcd/raft/util.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "bytes" - "fmt" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -func (st StateType) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf("%q", st.String())), nil -} - -// uint64Slice implements sort interface -type uint64Slice []uint64 - -func (p uint64Slice) Len() int { return len(p) } -func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func min(a, b uint64) uint64 { - if a > b { - return b - } - return a -} - -func max(a, b uint64) uint64 { - if a > b { - return a - } - return b -} - -func IsLocalMsg(msgt pb.MessageType) bool { - return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || - msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum -} - -func IsResponseMsg(msgt pb.MessageType) bool { - return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp -} - -// voteResponseType maps vote and prevote message types to their corresponding responses. -func voteRespMsgType(msgt pb.MessageType) pb.MessageType { - switch msgt { - case pb.MsgVote: - return pb.MsgVoteResp - case pb.MsgPreVote: - return pb.MsgPreVoteResp - default: - panic(fmt.Sprintf("not a vote message: %s", msgt)) - } -} - -// EntryFormatter can be implemented by the application to provide human-readable formatting -// of entry data. Nil is a valid EntryFormatter and will use a default format. -type EntryFormatter func([]byte) string - -// DescribeMessage returns a concise human-readable description of a -// Message for debugging. -func DescribeMessage(m pb.Message, f EntryFormatter) string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) - if m.Reject { - fmt.Fprintf(&buf, " Rejected") - if m.RejectHint != 0 { - fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint) - } - } - if m.Commit != 0 { - fmt.Fprintf(&buf, " Commit:%d", m.Commit) - } - if len(m.Entries) > 0 { - fmt.Fprintf(&buf, " Entries:[") - for i, e := range m.Entries { - if i != 0 { - buf.WriteString(", ") - } - buf.WriteString(DescribeEntry(e, f)) - } - fmt.Fprintf(&buf, "]") - } - if !IsEmptySnap(m.Snapshot) { - fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot) - } - return buf.String() -} - -// DescribeEntry returns a concise human-readable description of an -// Entry for debugging. -func DescribeEntry(e pb.Entry, f EntryFormatter) string { - var formatted string - if e.Type == pb.EntryNormal && f != nil { - formatted = f(e.Data) - } else { - formatted = fmt.Sprintf("%q", e.Data) - } - return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted) -} - -func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { - if len(ents) == 0 { - return ents - } - size := ents[0].Size() - var limit int - for limit = 1; limit < len(ents); limit++ { - size += ents[limit].Size() - if uint64(size) > maxSize { - break - } - } - return ents[:limit] -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/coder.go b/vendor/github.com/coreos/etcd/rafthttp/coder.go deleted file mode 100644 index 86ede972e1f..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/coder.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import "github.com/coreos/etcd/raft/raftpb" - -type encoder interface { - // encode encodes the given message to an output stream. - encode(m *raftpb.Message) error -} - -type decoder interface { - // decode decodes the message from an input stream. - decode() (raftpb.Message, error) -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/doc.go b/vendor/github.com/coreos/etcd/rafthttp/doc.go deleted file mode 100644 index a9486a8bb66..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package rafthttp implements HTTP transportation layer for etcd/raft pkg. -package rafthttp diff --git a/vendor/github.com/coreos/etcd/rafthttp/http.go b/vendor/github.com/coreos/etcd/rafthttp/http.go deleted file mode 100644 index 55df26e9b75..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/http.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "errors" - "fmt" - "io/ioutil" - "net/http" - "path" - "strings" - - pioutil "github.com/coreos/etcd/pkg/ioutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/version" - "golang.org/x/net/context" -) - -const ( - // connReadLimitByte limits the number of bytes - // a single read can read out. - // - // 64KB should be large enough for not causing - // throughput bottleneck as well as small enough - // for not causing a read timeout. - connReadLimitByte = 64 * 1024 -) - -var ( - RaftPrefix = "/raft" - ProbingPrefix = path.Join(RaftPrefix, "probing") - RaftStreamPrefix = path.Join(RaftPrefix, "stream") - RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot") - - errIncompatibleVersion = errors.New("incompatible version") - errClusterIDMismatch = errors.New("cluster ID mismatch") -) - -type peerGetter interface { - Get(id types.ID) Peer -} - -type writerToResponse interface { - WriteTo(w http.ResponseWriter) -} - -type pipelineHandler struct { - tr Transporter - r Raft - cid types.ID -} - -// newPipelineHandler returns a handler for handling raft messages -// from pipeline for RaftPrefix. -// -// The handler reads out the raft message from request body, -// and forwards it to the given raft state machine for processing. -func newPipelineHandler(tr Transporter, r Raft, cid types.ID) http.Handler { - return &pipelineHandler{ - tr: tr, - r: r, - cid: cid, - } -} - -func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - w.Header().Set("Allow", "POST") - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } - - w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) - - if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil { - http.Error(w, err.Error(), http.StatusPreconditionFailed) - return - } - - if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err != nil { - if urls := r.Header.Get("X-PeerURLs"); urls != "" { - h.tr.AddRemote(from, strings.Split(urls, ",")) - } - } - - // Limit the data size that could be read from the request body, which ensures that read from - // connection will not time out accidentally due to possible blocking in underlying implementation. - limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte) - b, err := ioutil.ReadAll(limitedr) - if err != nil { - plog.Errorf("failed to read raft message (%v)", err) - http.Error(w, "error reading raft message", http.StatusBadRequest) - recvFailures.WithLabelValues(r.RemoteAddr).Inc() - return - } - - var m raftpb.Message - if err := m.Unmarshal(b); err != nil { - plog.Errorf("failed to unmarshal raft message (%v)", err) - http.Error(w, "error unmarshaling raft message", http.StatusBadRequest) - recvFailures.WithLabelValues(r.RemoteAddr).Inc() - return - } - - receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(len(b))) - - if err := h.r.Process(context.TODO(), m); err != nil { - switch v := err.(type) { - case writerToResponse: - v.WriteTo(w) - default: - plog.Warningf("failed to process raft message (%v)", err) - http.Error(w, "error processing raft message", http.StatusInternalServerError) - w.(http.Flusher).Flush() - // disconnect the http stream - panic(err) - } - return - } - - // Write StatusNoContent header after the message has been processed by - // raft, which facilitates the client to report MsgSnap status. - w.WriteHeader(http.StatusNoContent) -} - -type snapshotHandler struct { - tr Transporter - r Raft - snapshotter *snap.Snapshotter - cid types.ID -} - -func newSnapshotHandler(tr Transporter, r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler { - return &snapshotHandler{ - tr: tr, - r: r, - snapshotter: snapshotter, - cid: cid, - } -} - -// ServeHTTP serves HTTP request to receive and process snapshot message. -// -// If request sender dies without closing underlying TCP connection, -// the handler will keep waiting for the request body until TCP keepalive -// finds out that the connection is broken after several minutes. -// This is acceptable because -// 1. snapshot messages sent through other TCP connections could still be -// received and processed. -// 2. this case should happen rarely, so no further optimization is done. -func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - w.Header().Set("Allow", "POST") - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } - - w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) - - if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil { - http.Error(w, err.Error(), http.StatusPreconditionFailed) - return - } - - if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err != nil { - if urls := r.Header.Get("X-PeerURLs"); urls != "" { - h.tr.AddRemote(from, strings.Split(urls, ",")) - } - } - - dec := &messageDecoder{r: r.Body} - // let snapshots be very large since they can exceed 512MB for large installations - m, err := dec.decodeLimit(uint64(1 << 63)) - if err != nil { - msg := fmt.Sprintf("failed to decode raft message (%v)", err) - plog.Errorf(msg) - http.Error(w, msg, http.StatusBadRequest) - recvFailures.WithLabelValues(r.RemoteAddr).Inc() - return - } - - receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size())) - - if m.Type != raftpb.MsgSnap { - plog.Errorf("unexpected raft message type %s on snapshot path", m.Type) - http.Error(w, "wrong raft message type", http.StatusBadRequest) - return - } - - plog.Infof("receiving database snapshot [index:%d, from %s] ...", m.Snapshot.Metadata.Index, types.ID(m.From)) - // save incoming database snapshot. - n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index) - if err != nil { - msg := fmt.Sprintf("failed to save KV snapshot (%v)", err) - plog.Error(msg) - http.Error(w, msg, http.StatusInternalServerError) - return - } - receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(n)) - plog.Infof("received and saved database snapshot [index: %d, from: %s] successfully", m.Snapshot.Metadata.Index, types.ID(m.From)) - - if err := h.r.Process(context.TODO(), m); err != nil { - switch v := err.(type) { - // Process may return writerToResponse error when doing some - // additional checks before calling raft.Node.Step. - case writerToResponse: - v.WriteTo(w) - default: - msg := fmt.Sprintf("failed to process raft message (%v)", err) - plog.Warningf(msg) - http.Error(w, msg, http.StatusInternalServerError) - } - return - } - // Write StatusNoContent header after the message has been processed by - // raft, which facilitates the client to report MsgSnap status. - w.WriteHeader(http.StatusNoContent) -} - -type streamHandler struct { - tr *Transport - peerGetter peerGetter - r Raft - id types.ID - cid types.ID -} - -func newStreamHandler(tr *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler { - return &streamHandler{ - tr: tr, - peerGetter: pg, - r: r, - id: id, - cid: cid, - } -} - -func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { - w.Header().Set("Allow", "GET") - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return - } - - w.Header().Set("X-Server-Version", version.Version) - w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) - - if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil { - http.Error(w, err.Error(), http.StatusPreconditionFailed) - return - } - - var t streamType - switch path.Dir(r.URL.Path) { - case streamTypeMsgAppV2.endpoint(): - t = streamTypeMsgAppV2 - case streamTypeMessage.endpoint(): - t = streamTypeMessage - default: - plog.Debugf("ignored unexpected streaming request path %s", r.URL.Path) - http.Error(w, "invalid path", http.StatusNotFound) - return - } - - fromStr := path.Base(r.URL.Path) - from, err := types.IDFromString(fromStr) - if err != nil { - plog.Errorf("failed to parse from %s into ID (%v)", fromStr, err) - http.Error(w, "invalid from", http.StatusNotFound) - return - } - if h.r.IsIDRemoved(uint64(from)) { - plog.Warningf("rejected the stream from peer %s since it was removed", from) - http.Error(w, "removed member", http.StatusGone) - return - } - p := h.peerGetter.Get(from) - if p == nil { - // This may happen in following cases: - // 1. user starts a remote peer that belongs to a different cluster - // with the same cluster ID. - // 2. local etcd falls behind of the cluster, and cannot recognize - // the members that joined after its current progress. - if urls := r.Header.Get("X-PeerURLs"); urls != "" { - h.tr.AddRemote(from, strings.Split(urls, ",")) - } - plog.Errorf("failed to find member %s in cluster %s", from, h.cid) - http.Error(w, "error sender not found", http.StatusNotFound) - return - } - - wto := h.id.String() - if gto := r.Header.Get("X-Raft-To"); gto != wto { - plog.Errorf("streaming request ignored (ID mismatch got %s want %s)", gto, wto) - http.Error(w, "to field mismatch", http.StatusPreconditionFailed) - return - } - - w.WriteHeader(http.StatusOK) - w.(http.Flusher).Flush() - - c := newCloseNotifier() - conn := &outgoingConn{ - t: t, - Writer: w, - Flusher: w.(http.Flusher), - Closer: c, - } - p.attachOutgoingConn(conn) - <-c.closeNotify() -} - -// checkClusterCompatibilityFromHeader checks the cluster compatibility of -// the local member from the given header. -// It checks whether the version of local member is compatible with -// the versions in the header, and whether the cluster ID of local member -// matches the one in the header. -func checkClusterCompatibilityFromHeader(header http.Header, cid types.ID) error { - if err := checkVersionCompability(header.Get("X-Server-From"), serverVersion(header), minClusterVersion(header)); err != nil { - plog.Errorf("request version incompatibility (%v)", err) - return errIncompatibleVersion - } - if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() { - plog.Errorf("request cluster ID mismatch (got %s want %s)", gcid, cid) - return errClusterIDMismatch - } - return nil -} - -type closeNotifier struct { - done chan struct{} -} - -func newCloseNotifier() *closeNotifier { - return &closeNotifier{ - done: make(chan struct{}), - } -} - -func (n *closeNotifier) Close() error { - close(n.done) - return nil -} - -func (n *closeNotifier) closeNotify() <-chan struct{} { return n.done } diff --git a/vendor/github.com/coreos/etcd/rafthttp/metrics.go b/vendor/github.com/coreos/etcd/rafthttp/metrics.go deleted file mode 100644 index 320bfe72661..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/metrics.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import "github.com/prometheus/client_golang/prometheus" - -var ( - sentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "peer_sent_bytes_total", - Help: "The total number of bytes sent to peers.", - }, - []string{"To"}, - ) - - receivedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "peer_received_bytes_total", - Help: "The total number of bytes received from peers.", - }, - []string{"From"}, - ) - - sentFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "peer_sent_failures_total", - Help: "The total number of send failures from peers.", - }, - []string{"To"}, - ) - - recvFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "peer_received_failures_total", - Help: "The total number of receive failures from peers.", - }, - []string{"From"}, - ) - - rtts = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "network", - Name: "peer_round_trip_time_seconds", - Help: "Round-Trip-Time histogram between peers.", - Buckets: prometheus.ExponentialBuckets(0.0001, 2, 14), - }, - []string{"To"}, - ) -) - -func init() { - prometheus.MustRegister(sentBytes) - prometheus.MustRegister(receivedBytes) - prometheus.MustRegister(sentFailures) - prometheus.MustRegister(recvFailures) - prometheus.MustRegister(rtts) -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go b/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go deleted file mode 100644 index ef59bc8883f..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft/raftpb" -) - -// messageEncoder is a encoder that can encode all kinds of messages. -// It MUST be used with a paired messageDecoder. -type messageEncoder struct { - w io.Writer -} - -func (enc *messageEncoder) encode(m *raftpb.Message) error { - if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil { - return err - } - _, err := enc.w.Write(pbutil.MustMarshal(m)) - return err -} - -// messageDecoder is a decoder that can decode all kinds of messages. -type messageDecoder struct { - r io.Reader -} - -var ( - readBytesLimit uint64 = 512 * 1024 * 1024 // 512 MB - ErrExceedSizeLimit = errors.New("rafthttp: error limit exceeded") -) - -func (dec *messageDecoder) decode() (raftpb.Message, error) { - return dec.decodeLimit(readBytesLimit) -} - -func (dec *messageDecoder) decodeLimit(numBytes uint64) (raftpb.Message, error) { - var m raftpb.Message - var l uint64 - if err := binary.Read(dec.r, binary.BigEndian, &l); err != nil { - return m, err - } - if l > numBytes { - return m, ErrExceedSizeLimit - } - buf := make([]byte, int(l)) - if _, err := io.ReadFull(dec.r, buf); err != nil { - return m, err - } - return m, m.Unmarshal(buf) -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go b/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go deleted file mode 100644 index 013ffe7c736..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "encoding/binary" - "fmt" - "io" - "time" - - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" -) - -const ( - msgTypeLinkHeartbeat uint8 = 0 - msgTypeAppEntries uint8 = 1 - msgTypeApp uint8 = 2 - - msgAppV2BufSize = 1024 * 1024 -) - -// msgappv2 stream sends three types of message: linkHeartbeatMessage, -// AppEntries and MsgApp. AppEntries is the MsgApp that is sent in -// replicate state in raft, whose index and term are fully predictable. -// -// Data format of linkHeartbeatMessage: -// | offset | bytes | description | -// +--------+-------+-------------+ -// | 0 | 1 | \x00 | -// -// Data format of AppEntries: -// | offset | bytes | description | -// +--------+-------+-------------+ -// | 0 | 1 | \x01 | -// | 1 | 8 | length of entries | -// | 9 | 8 | length of first entry | -// | 17 | n1 | first entry | -// ... -// | x | 8 | length of k-th entry data | -// | x+8 | nk | k-th entry data | -// | x+8+nk | 8 | commit index | -// -// Data format of MsgApp: -// | offset | bytes | description | -// +--------+-------+-------------+ -// | 0 | 1 | \x02 | -// | 1 | 8 | length of encoded message | -// | 9 | n | encoded message | -type msgAppV2Encoder struct { - w io.Writer - fs *stats.FollowerStats - - term uint64 - index uint64 - buf []byte - uint64buf []byte - uint8buf []byte -} - -func newMsgAppV2Encoder(w io.Writer, fs *stats.FollowerStats) *msgAppV2Encoder { - return &msgAppV2Encoder{ - w: w, - fs: fs, - buf: make([]byte, msgAppV2BufSize), - uint64buf: make([]byte, 8), - uint8buf: make([]byte, 1), - } -} - -func (enc *msgAppV2Encoder) encode(m *raftpb.Message) error { - start := time.Now() - switch { - case isLinkHeartbeatMessage(m): - enc.uint8buf[0] = byte(msgTypeLinkHeartbeat) - if _, err := enc.w.Write(enc.uint8buf); err != nil { - return err - } - case enc.index == m.Index && enc.term == m.LogTerm && m.LogTerm == m.Term: - enc.uint8buf[0] = byte(msgTypeAppEntries) - if _, err := enc.w.Write(enc.uint8buf); err != nil { - return err - } - // write length of entries - binary.BigEndian.PutUint64(enc.uint64buf, uint64(len(m.Entries))) - if _, err := enc.w.Write(enc.uint64buf); err != nil { - return err - } - for i := 0; i < len(m.Entries); i++ { - // write length of entry - binary.BigEndian.PutUint64(enc.uint64buf, uint64(m.Entries[i].Size())) - if _, err := enc.w.Write(enc.uint64buf); err != nil { - return err - } - if n := m.Entries[i].Size(); n < msgAppV2BufSize { - if _, err := m.Entries[i].MarshalTo(enc.buf); err != nil { - return err - } - if _, err := enc.w.Write(enc.buf[:n]); err != nil { - return err - } - } else { - if _, err := enc.w.Write(pbutil.MustMarshal(&m.Entries[i])); err != nil { - return err - } - } - enc.index++ - } - // write commit index - binary.BigEndian.PutUint64(enc.uint64buf, m.Commit) - if _, err := enc.w.Write(enc.uint64buf); err != nil { - return err - } - enc.fs.Succ(time.Since(start)) - default: - if err := binary.Write(enc.w, binary.BigEndian, msgTypeApp); err != nil { - return err - } - // write size of message - if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil { - return err - } - // write message - if _, err := enc.w.Write(pbutil.MustMarshal(m)); err != nil { - return err - } - - enc.term = m.Term - enc.index = m.Index - if l := len(m.Entries); l > 0 { - enc.index = m.Entries[l-1].Index - } - enc.fs.Succ(time.Since(start)) - } - return nil -} - -type msgAppV2Decoder struct { - r io.Reader - local, remote types.ID - - term uint64 - index uint64 - buf []byte - uint64buf []byte - uint8buf []byte -} - -func newMsgAppV2Decoder(r io.Reader, local, remote types.ID) *msgAppV2Decoder { - return &msgAppV2Decoder{ - r: r, - local: local, - remote: remote, - buf: make([]byte, msgAppV2BufSize), - uint64buf: make([]byte, 8), - uint8buf: make([]byte, 1), - } -} - -func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) { - var ( - m raftpb.Message - typ uint8 - ) - if _, err := io.ReadFull(dec.r, dec.uint8buf); err != nil { - return m, err - } - typ = uint8(dec.uint8buf[0]) - switch typ { - case msgTypeLinkHeartbeat: - return linkHeartbeatMessage, nil - case msgTypeAppEntries: - m = raftpb.Message{ - Type: raftpb.MsgApp, - From: uint64(dec.remote), - To: uint64(dec.local), - Term: dec.term, - LogTerm: dec.term, - Index: dec.index, - } - - // decode entries - if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil { - return m, err - } - l := binary.BigEndian.Uint64(dec.uint64buf) - m.Entries = make([]raftpb.Entry, int(l)) - for i := 0; i < int(l); i++ { - if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil { - return m, err - } - size := binary.BigEndian.Uint64(dec.uint64buf) - var buf []byte - if size < msgAppV2BufSize { - buf = dec.buf[:size] - if _, err := io.ReadFull(dec.r, buf); err != nil { - return m, err - } - } else { - buf = make([]byte, int(size)) - if _, err := io.ReadFull(dec.r, buf); err != nil { - return m, err - } - } - dec.index++ - // 1 alloc - pbutil.MustUnmarshal(&m.Entries[i], buf) - } - // decode commit index - if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil { - return m, err - } - m.Commit = binary.BigEndian.Uint64(dec.uint64buf) - case msgTypeApp: - var size uint64 - if err := binary.Read(dec.r, binary.BigEndian, &size); err != nil { - return m, err - } - buf := make([]byte, int(size)) - if _, err := io.ReadFull(dec.r, buf); err != nil { - return m, err - } - pbutil.MustUnmarshal(&m, buf) - - dec.term = m.Term - dec.index = m.Index - if l := len(m.Entries); l > 0 { - dec.index = m.Entries[l-1].Index - } - default: - return m, fmt.Errorf("failed to parse type %d in msgappv2 stream", typ) - } - return m, nil -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/peer.go b/vendor/github.com/coreos/etcd/rafthttp/peer.go deleted file mode 100644 index a82d7beed74..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/peer.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "sync" - "time" - - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" - "golang.org/x/net/context" -) - -const ( - // ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates. - // A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for - // tcp keepalive failing to detect a bad connection, which is at minutes level. - // For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage - // to keep the connection alive. - // For short term pipeline connections, the connection MUST be killed to avoid it being - // put back to http pkg connection pool. - ConnReadTimeout = 5 * time.Second - ConnWriteTimeout = 5 * time.Second - - recvBufSize = 4096 - // maxPendingProposals holds the proposals during one leader election process. - // Generally one leader election takes at most 1 sec. It should have - // 0-2 election conflicts, and each one takes 0.5 sec. - // We assume the number of concurrent proposers is smaller than 4096. - // One client blocks on its proposal for at least 1 sec, so 4096 is enough - // to hold all proposals. - maxPendingProposals = 4096 - - streamAppV2 = "streamMsgAppV2" - streamMsg = "streamMsg" - pipelineMsg = "pipeline" - sendSnap = "sendMsgSnap" -) - -type Peer interface { - // send sends the message to the remote peer. The function is non-blocking - // and has no promise that the message will be received by the remote. - // When it fails to send message out, it will report the status to underlying - // raft. - send(m raftpb.Message) - - // sendSnap sends the merged snapshot message to the remote peer. Its behavior - // is similar to send. - sendSnap(m snap.Message) - - // update updates the urls of remote peer. - update(urls types.URLs) - - // attachOutgoingConn attaches the outgoing connection to the peer for - // stream usage. After the call, the ownership of the outgoing - // connection hands over to the peer. The peer will close the connection - // when it is no longer used. - attachOutgoingConn(conn *outgoingConn) - // activeSince returns the time that the connection with the - // peer becomes active. - activeSince() time.Time - // stop performs any necessary finalization and terminates the peer - // elegantly. - stop() -} - -// peer is the representative of a remote raft node. Local raft node sends -// messages to the remote through peer. -// Each peer has two underlying mechanisms to send out a message: stream and -// pipeline. -// A stream is a receiver initialized long-polling connection, which -// is always open to transfer messages. Besides general stream, peer also has -// a optimized stream for sending msgApp since msgApp accounts for large part -// of all messages. Only raft leader uses the optimized stream to send msgApp -// to the remote follower node. -// A pipeline is a series of http clients that send http requests to the remote. -// It is only used when the stream has not been established. -type peer struct { - // id of the remote raft peer node - id types.ID - r Raft - - status *peerStatus - - picker *urlPicker - - msgAppV2Writer *streamWriter - writer *streamWriter - pipeline *pipeline - snapSender *snapshotSender // snapshot sender to send v3 snapshot messages - msgAppV2Reader *streamReader - msgAppReader *streamReader - - recvc chan raftpb.Message - propc chan raftpb.Message - - mu sync.Mutex - paused bool - - cancel context.CancelFunc // cancel pending works in go routine created by peer. - stopc chan struct{} -} - -func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer { - plog.Infof("starting peer %s...", peerID) - defer plog.Infof("started peer %s", peerID) - - status := newPeerStatus(peerID) - picker := newURLPicker(urls) - errorc := transport.ErrorC - r := transport.Raft - pipeline := &pipeline{ - peerID: peerID, - tr: transport, - picker: picker, - status: status, - followerStats: fs, - raft: r, - errorc: errorc, - } - pipeline.start() - - p := &peer{ - id: peerID, - r: r, - status: status, - picker: picker, - msgAppV2Writer: startStreamWriter(peerID, status, fs, r), - writer: startStreamWriter(peerID, status, fs, r), - pipeline: pipeline, - snapSender: newSnapshotSender(transport, picker, peerID, status), - recvc: make(chan raftpb.Message, recvBufSize), - propc: make(chan raftpb.Message, maxPendingProposals), - stopc: make(chan struct{}), - } - - ctx, cancel := context.WithCancel(context.Background()) - p.cancel = cancel - go func() { - for { - select { - case mm := <-p.recvc: - if err := r.Process(ctx, mm); err != nil { - plog.Warningf("failed to process raft message (%v)", err) - } - case <-p.stopc: - return - } - } - }() - - // r.Process might block for processing proposal when there is no leader. - // Thus propc must be put into a separate routine with recvc to avoid blocking - // processing other raft messages. - go func() { - for { - select { - case mm := <-p.propc: - if err := r.Process(ctx, mm); err != nil { - plog.Warningf("failed to process raft message (%v)", err) - } - case <-p.stopc: - return - } - } - }() - - p.msgAppV2Reader = &streamReader{ - peerID: peerID, - typ: streamTypeMsgAppV2, - tr: transport, - picker: picker, - status: status, - recvc: p.recvc, - propc: p.propc, - } - p.msgAppReader = &streamReader{ - peerID: peerID, - typ: streamTypeMessage, - tr: transport, - picker: picker, - status: status, - recvc: p.recvc, - propc: p.propc, - } - p.msgAppV2Reader.start() - p.msgAppReader.start() - - return p -} - -func (p *peer) send(m raftpb.Message) { - p.mu.Lock() - paused := p.paused - p.mu.Unlock() - - if paused { - return - } - - writec, name := p.pick(m) - select { - case writec <- m: - default: - p.r.ReportUnreachable(m.To) - if isMsgSnap(m) { - p.r.ReportSnapshot(m.To, raft.SnapshotFailure) - } - if p.status.isActive() { - plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name) - } - plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name) - } -} - -func (p *peer) sendSnap(m snap.Message) { - go p.snapSender.send(m) -} - -func (p *peer) update(urls types.URLs) { - p.picker.update(urls) -} - -func (p *peer) attachOutgoingConn(conn *outgoingConn) { - var ok bool - switch conn.t { - case streamTypeMsgAppV2: - ok = p.msgAppV2Writer.attach(conn) - case streamTypeMessage: - ok = p.writer.attach(conn) - default: - plog.Panicf("unhandled stream type %s", conn.t) - } - if !ok { - conn.Close() - } -} - -func (p *peer) activeSince() time.Time { return p.status.activeSince() } - -// Pause pauses the peer. The peer will simply drops all incoming -// messages without returning an error. -func (p *peer) Pause() { - p.mu.Lock() - defer p.mu.Unlock() - p.paused = true - p.msgAppReader.pause() - p.msgAppV2Reader.pause() -} - -// Resume resumes a paused peer. -func (p *peer) Resume() { - p.mu.Lock() - defer p.mu.Unlock() - p.paused = false - p.msgAppReader.resume() - p.msgAppV2Reader.resume() -} - -func (p *peer) stop() { - plog.Infof("stopping peer %s...", p.id) - defer plog.Infof("stopped peer %s", p.id) - - close(p.stopc) - p.cancel() - p.msgAppV2Writer.stop() - p.writer.stop() - p.pipeline.stop() - p.snapSender.stop() - p.msgAppV2Reader.stop() - p.msgAppReader.stop() -} - -// pick picks a chan for sending the given message. The picked chan and the picked chan -// string name are returned. -func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) { - var ok bool - // Considering MsgSnap may have a big size, e.g., 1G, and will block - // stream for a long time, only use one of the N pipelines to send MsgSnap. - if isMsgSnap(m) { - return p.pipeline.msgc, pipelineMsg - } else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) { - return writec, streamAppV2 - } else if writec, ok = p.writer.writec(); ok { - return writec, streamMsg - } - return p.pipeline.msgc, pipelineMsg -} - -func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp } - -func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap } diff --git a/vendor/github.com/coreos/etcd/rafthttp/peer_status.go b/vendor/github.com/coreos/etcd/rafthttp/peer_status.go deleted file mode 100644 index 706144f6466..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/peer_status.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "fmt" - "sync" - "time" - - "github.com/coreos/etcd/pkg/types" -) - -type failureType struct { - source string - action string -} - -type peerStatus struct { - id types.ID - mu sync.Mutex // protect variables below - active bool - since time.Time -} - -func newPeerStatus(id types.ID) *peerStatus { - return &peerStatus{ - id: id, - } -} - -func (s *peerStatus) activate() { - s.mu.Lock() - defer s.mu.Unlock() - if !s.active { - plog.Infof("peer %s became active", s.id) - s.active = true - s.since = time.Now() - } -} - -func (s *peerStatus) deactivate(failure failureType, reason string) { - s.mu.Lock() - defer s.mu.Unlock() - msg := fmt.Sprintf("failed to %s %s on %s (%s)", failure.action, s.id, failure.source, reason) - if s.active { - plog.Errorf(msg) - plog.Infof("peer %s became inactive", s.id) - s.active = false - s.since = time.Time{} - return - } - plog.Debugf(msg) -} - -func (s *peerStatus) isActive() bool { - s.mu.Lock() - defer s.mu.Unlock() - return s.active -} - -func (s *peerStatus) activeSince() time.Time { - s.mu.Lock() - defer s.mu.Unlock() - return s.since -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go deleted file mode 100644 index ccd9eb78698..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "bytes" - "errors" - "io/ioutil" - "sync" - "time" - - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/httputil" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" -) - -const ( - connPerPipeline = 4 - // pipelineBufSize is the size of pipeline buffer, which helps hold the - // temporary network latency. - // The size ensures that pipeline does not drop messages when the network - // is out of work for less than 1 second in good path. - pipelineBufSize = 64 -) - -var errStopped = errors.New("stopped") - -type pipeline struct { - peerID types.ID - - tr *Transport - picker *urlPicker - status *peerStatus - raft Raft - errorc chan error - // deprecate when we depercate v2 API - followerStats *stats.FollowerStats - - msgc chan raftpb.Message - // wait for the handling routines - wg sync.WaitGroup - stopc chan struct{} -} - -func (p *pipeline) start() { - p.stopc = make(chan struct{}) - p.msgc = make(chan raftpb.Message, pipelineBufSize) - p.wg.Add(connPerPipeline) - for i := 0; i < connPerPipeline; i++ { - go p.handle() - } - plog.Infof("started HTTP pipelining with peer %s", p.peerID) -} - -func (p *pipeline) stop() { - close(p.stopc) - p.wg.Wait() - plog.Infof("stopped HTTP pipelining with peer %s", p.peerID) -} - -func (p *pipeline) handle() { - defer p.wg.Done() - - for { - select { - case m := <-p.msgc: - start := time.Now() - err := p.post(pbutil.MustMarshal(&m)) - end := time.Now() - - if err != nil { - p.status.deactivate(failureType{source: pipelineMsg, action: "write"}, err.Error()) - - if m.Type == raftpb.MsgApp && p.followerStats != nil { - p.followerStats.Fail() - } - p.raft.ReportUnreachable(m.To) - if isMsgSnap(m) { - p.raft.ReportSnapshot(m.To, raft.SnapshotFailure) - } - sentFailures.WithLabelValues(types.ID(m.To).String()).Inc() - continue - } - - p.status.activate() - if m.Type == raftpb.MsgApp && p.followerStats != nil { - p.followerStats.Succ(end.Sub(start)) - } - if isMsgSnap(m) { - p.raft.ReportSnapshot(m.To, raft.SnapshotFinish) - } - sentBytes.WithLabelValues(types.ID(m.To).String()).Add(float64(m.Size())) - case <-p.stopc: - return - } - } -} - -// post POSTs a data payload to a url. Returns nil if the POST succeeds, -// error on any failure. -func (p *pipeline) post(data []byte) (err error) { - u := p.picker.pick() - req := createPostRequest(u, RaftPrefix, bytes.NewBuffer(data), "application/protobuf", p.tr.URLs, p.tr.ID, p.tr.ClusterID) - - done := make(chan struct{}, 1) - cancel := httputil.RequestCanceler(req) - go func() { - select { - case <-done: - case <-p.stopc: - waitSchedule() - cancel() - } - }() - - resp, err := p.tr.pipelineRt.RoundTrip(req) - done <- struct{}{} - if err != nil { - p.picker.unreachable(u) - return err - } - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - p.picker.unreachable(u) - return err - } - resp.Body.Close() - - err = checkPostResponse(resp, b, req, p.peerID) - if err != nil { - p.picker.unreachable(u) - // errMemberRemoved is a critical error since a removed member should - // always be stopped. So we use reportCriticalError to report it to errorc. - if err == errMemberRemoved { - reportCriticalError(err, p.errorc) - } - return err - } - - return nil -} - -// waitSchedule waits other goroutines to be scheduled for a while -func waitSchedule() { time.Sleep(time.Millisecond) } diff --git a/vendor/github.com/coreos/etcd/rafthttp/probing_status.go b/vendor/github.com/coreos/etcd/rafthttp/probing_status.go deleted file mode 100644 index c7a3c7ab931..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/probing_status.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "time" - - "github.com/xiang90/probing" -) - -var ( - // proberInterval must be shorter than read timeout. - // Or the connection will time-out. - proberInterval = ConnReadTimeout - time.Second - statusMonitoringInterval = 30 * time.Second - statusErrorInterval = 5 * time.Second -) - -func addPeerToProber(p probing.Prober, id string, us []string) { - hus := make([]string, len(us)) - for i := range us { - hus[i] = us[i] + ProbingPrefix - } - - p.AddHTTP(id, proberInterval, hus) - - s, err := p.Status(id) - if err != nil { - plog.Errorf("failed to add peer %s into prober", id) - } else { - go monitorProbingStatus(s, id) - } -} - -func monitorProbingStatus(s probing.Status, id string) { - // set the first interval short to log error early. - interval := statusErrorInterval - for { - select { - case <-time.After(interval): - if !s.Health() { - plog.Warningf("health check for peer %s could not connect: %v", id, s.Err()) - interval = statusErrorInterval - } else { - interval = statusMonitoringInterval - } - if s.ClockDiff() > time.Second { - plog.Warningf("the clock difference against peer %s is too high [%v > %v]", id, s.ClockDiff(), time.Second) - } - rtts.WithLabelValues(id).Observe(s.SRTT().Seconds()) - case <-s.StopNotify(): - return - } - } -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/remote.go b/vendor/github.com/coreos/etcd/rafthttp/remote.go deleted file mode 100644 index c62c818235a..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/remote.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" -) - -type remote struct { - id types.ID - status *peerStatus - pipeline *pipeline -} - -func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote { - picker := newURLPicker(urls) - status := newPeerStatus(id) - pipeline := &pipeline{ - peerID: id, - tr: tr, - picker: picker, - status: status, - raft: tr.Raft, - errorc: tr.ErrorC, - } - pipeline.start() - - return &remote{ - id: id, - status: status, - pipeline: pipeline, - } -} - -func (g *remote) send(m raftpb.Message) { - select { - case g.pipeline.msgc <- m: - default: - if g.status.isActive() { - plog.MergeWarningf("dropped internal raft message to %s since sending buffer is full (bad/overloaded network)", g.id) - } - plog.Debugf("dropped %s to %s since sending buffer is full", m.Type, g.id) - } -} - -func (g *remote) stop() { - g.pipeline.stop() -} - -func (g *remote) Pause() { - g.stop() -} - -func (g *remote) Resume() { - g.pipeline.start() -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go deleted file mode 100644 index 105b330728e..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/coreos/etcd/pkg/httputil" - pioutil "github.com/coreos/etcd/pkg/ioutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/snap" -) - -var ( - // timeout for reading snapshot response body - snapResponseReadTimeout = 5 * time.Second -) - -type snapshotSender struct { - from, to types.ID - cid types.ID - - tr *Transport - picker *urlPicker - status *peerStatus - r Raft - errorc chan error - - stopc chan struct{} -} - -func newSnapshotSender(tr *Transport, picker *urlPicker, to types.ID, status *peerStatus) *snapshotSender { - return &snapshotSender{ - from: tr.ID, - to: to, - cid: tr.ClusterID, - tr: tr, - picker: picker, - status: status, - r: tr.Raft, - errorc: tr.ErrorC, - stopc: make(chan struct{}), - } -} - -func (s *snapshotSender) stop() { close(s.stopc) } - -func (s *snapshotSender) send(merged snap.Message) { - m := merged.Message - - body := createSnapBody(merged) - defer body.Close() - - u := s.picker.pick() - req := createPostRequest(u, RaftSnapshotPrefix, body, "application/octet-stream", s.tr.URLs, s.from, s.cid) - - plog.Infof("start to send database snapshot [index: %d, to %s]...", m.Snapshot.Metadata.Index, types.ID(m.To)) - - err := s.post(req) - defer merged.CloseWithError(err) - if err != nil { - plog.Warningf("database snapshot [index: %d, to: %s] failed to be sent out (%v)", m.Snapshot.Metadata.Index, types.ID(m.To), err) - - // errMemberRemoved is a critical error since a removed member should - // always be stopped. So we use reportCriticalError to report it to errorc. - if err == errMemberRemoved { - reportCriticalError(err, s.errorc) - } - - s.picker.unreachable(u) - s.status.deactivate(failureType{source: sendSnap, action: "post"}, err.Error()) - s.r.ReportUnreachable(m.To) - // report SnapshotFailure to raft state machine. After raft state - // machine knows about it, it would pause a while and retry sending - // new snapshot message. - s.r.ReportSnapshot(m.To, raft.SnapshotFailure) - sentFailures.WithLabelValues(types.ID(m.To).String()).Inc() - return - } - s.status.activate() - s.r.ReportSnapshot(m.To, raft.SnapshotFinish) - plog.Infof("database snapshot [index: %d, to: %s] sent out successfully", m.Snapshot.Metadata.Index, types.ID(m.To)) - - sentBytes.WithLabelValues(types.ID(m.To).String()).Add(float64(merged.TotalSize)) -} - -// post posts the given request. -// It returns nil when request is sent out and processed successfully. -func (s *snapshotSender) post(req *http.Request) (err error) { - cancel := httputil.RequestCanceler(req) - - type responseAndError struct { - resp *http.Response - body []byte - err error - } - result := make(chan responseAndError, 1) - - go func() { - resp, err := s.tr.pipelineRt.RoundTrip(req) - if err != nil { - result <- responseAndError{resp, nil, err} - return - } - - // close the response body when timeouts. - // prevents from reading the body forever when the other side dies right after - // successfully receives the request body. - time.AfterFunc(snapResponseReadTimeout, func() { httputil.GracefulClose(resp) }) - body, err := ioutil.ReadAll(resp.Body) - result <- responseAndError{resp, body, err} - }() - - select { - case <-s.stopc: - cancel() - return errStopped - case r := <-result: - if r.err != nil { - return r.err - } - return checkPostResponse(r.resp, r.body, req, s.to) - } -} - -func createSnapBody(merged snap.Message) io.ReadCloser { - buf := new(bytes.Buffer) - enc := &messageEncoder{w: buf} - // encode raft message - if err := enc.encode(&merged.Message); err != nil { - plog.Panicf("encode message error (%v)", err) - } - - return &pioutil.ReaderAndCloser{ - Reader: io.MultiReader(buf, merged.ReadCloser), - Closer: merged.ReadCloser, - } -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/stream.go b/vendor/github.com/coreos/etcd/rafthttp/stream.go deleted file mode 100644 index e69a44ff65a..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/stream.go +++ /dev/null @@ -1,526 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "path" - "strings" - "sync" - "time" - - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/httputil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/version" - "github.com/coreos/go-semver/semver" -) - -const ( - streamTypeMessage streamType = "message" - streamTypeMsgAppV2 streamType = "msgappv2" - - streamBufSize = 4096 -) - -var ( - errUnsupportedStreamType = fmt.Errorf("unsupported stream type") - - // the key is in string format "major.minor.patch" - supportedStream = map[string][]streamType{ - "2.0.0": {}, - "2.1.0": {streamTypeMsgAppV2, streamTypeMessage}, - "2.2.0": {streamTypeMsgAppV2, streamTypeMessage}, - "2.3.0": {streamTypeMsgAppV2, streamTypeMessage}, - "3.0.0": {streamTypeMsgAppV2, streamTypeMessage}, - "3.1.0": {streamTypeMsgAppV2, streamTypeMessage}, - } -) - -type streamType string - -func (t streamType) endpoint() string { - switch t { - case streamTypeMsgAppV2: - return path.Join(RaftStreamPrefix, "msgapp") - case streamTypeMessage: - return path.Join(RaftStreamPrefix, "message") - default: - plog.Panicf("unhandled stream type %v", t) - return "" - } -} - -func (t streamType) String() string { - switch t { - case streamTypeMsgAppV2: - return "stream MsgApp v2" - case streamTypeMessage: - return "stream Message" - default: - return "unknown stream" - } -} - -var ( - // linkHeartbeatMessage is a special message used as heartbeat message in - // link layer. It never conflicts with messages from raft because raft - // doesn't send out messages without From and To fields. - linkHeartbeatMessage = raftpb.Message{Type: raftpb.MsgHeartbeat} -) - -func isLinkHeartbeatMessage(m *raftpb.Message) bool { - return m.Type == raftpb.MsgHeartbeat && m.From == 0 && m.To == 0 -} - -type outgoingConn struct { - t streamType - io.Writer - http.Flusher - io.Closer -} - -// streamWriter writes messages to the attached outgoingConn. -type streamWriter struct { - peerID types.ID - status *peerStatus - fs *stats.FollowerStats - r Raft - - mu sync.Mutex // guard field working and closer - closer io.Closer - working bool - - msgc chan raftpb.Message - connc chan *outgoingConn - stopc chan struct{} - done chan struct{} -} - -// startStreamWriter creates a streamWrite and starts a long running go-routine that accepts -// messages and writes to the attached outgoing connection. -func startStreamWriter(id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter { - w := &streamWriter{ - peerID: id, - status: status, - fs: fs, - r: r, - msgc: make(chan raftpb.Message, streamBufSize), - connc: make(chan *outgoingConn), - stopc: make(chan struct{}), - done: make(chan struct{}), - } - go w.run() - return w -} - -func (cw *streamWriter) run() { - var ( - msgc chan raftpb.Message - heartbeatc <-chan time.Time - t streamType - enc encoder - flusher http.Flusher - batched int - ) - tickc := time.Tick(ConnReadTimeout / 3) - unflushed := 0 - - plog.Infof("started streaming with peer %s (writer)", cw.peerID) - - for { - select { - case <-heartbeatc: - err := enc.encode(&linkHeartbeatMessage) - unflushed += linkHeartbeatMessage.Size() - if err == nil { - flusher.Flush() - batched = 0 - sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed)) - unflushed = 0 - continue - } - - cw.status.deactivate(failureType{source: t.String(), action: "heartbeat"}, err.Error()) - - sentFailures.WithLabelValues(cw.peerID.String()).Inc() - cw.close() - plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) - heartbeatc, msgc = nil, nil - - case m := <-msgc: - err := enc.encode(&m) - if err == nil { - unflushed += m.Size() - - if len(msgc) == 0 || batched > streamBufSize/2 { - flusher.Flush() - sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed)) - unflushed = 0 - batched = 0 - } else { - batched++ - } - - continue - } - - cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error()) - cw.close() - plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) - heartbeatc, msgc = nil, nil - cw.r.ReportUnreachable(m.To) - sentFailures.WithLabelValues(cw.peerID.String()).Inc() - - case conn := <-cw.connc: - cw.mu.Lock() - closed := cw.closeUnlocked() - t = conn.t - switch conn.t { - case streamTypeMsgAppV2: - enc = newMsgAppV2Encoder(conn.Writer, cw.fs) - case streamTypeMessage: - enc = &messageEncoder{w: conn.Writer} - default: - plog.Panicf("unhandled stream type %s", conn.t) - } - flusher = conn.Flusher - unflushed = 0 - cw.status.activate() - cw.closer = conn.Closer - cw.working = true - cw.mu.Unlock() - - if closed { - plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t) - } - plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t) - heartbeatc, msgc = tickc, cw.msgc - case <-cw.stopc: - if cw.close() { - plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) - } - plog.Infof("stopped streaming with peer %s (writer)", cw.peerID) - close(cw.done) - return - } - } -} - -func (cw *streamWriter) writec() (chan<- raftpb.Message, bool) { - cw.mu.Lock() - defer cw.mu.Unlock() - return cw.msgc, cw.working -} - -func (cw *streamWriter) close() bool { - cw.mu.Lock() - defer cw.mu.Unlock() - return cw.closeUnlocked() -} - -func (cw *streamWriter) closeUnlocked() bool { - if !cw.working { - return false - } - cw.closer.Close() - if len(cw.msgc) > 0 { - cw.r.ReportUnreachable(uint64(cw.peerID)) - } - cw.msgc = make(chan raftpb.Message, streamBufSize) - cw.working = false - return true -} - -func (cw *streamWriter) attach(conn *outgoingConn) bool { - select { - case cw.connc <- conn: - return true - case <-cw.done: - return false - } -} - -func (cw *streamWriter) stop() { - close(cw.stopc) - <-cw.done -} - -// streamReader is a long-running go-routine that dials to the remote stream -// endpoint and reads messages from the response body returned. -type streamReader struct { - peerID types.ID - typ streamType - - tr *Transport - picker *urlPicker - status *peerStatus - recvc chan<- raftpb.Message - propc chan<- raftpb.Message - - errorc chan<- error - - mu sync.Mutex - paused bool - cancel func() - closer io.Closer - - stopc chan struct{} - done chan struct{} -} - -func (r *streamReader) start() { - r.stopc = make(chan struct{}) - r.done = make(chan struct{}) - if r.errorc == nil { - r.errorc = r.tr.ErrorC - } - - go r.run() -} - -func (cr *streamReader) run() { - t := cr.typ - plog.Infof("started streaming with peer %s (%s reader)", cr.peerID, t) - for { - rc, err := cr.dial(t) - if err != nil { - if err != errUnsupportedStreamType { - cr.status.deactivate(failureType{source: t.String(), action: "dial"}, err.Error()) - } - } else { - cr.status.activate() - plog.Infof("established a TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ) - err := cr.decodeLoop(rc, t) - plog.Warningf("lost the TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ) - switch { - // all data is read out - case err == io.EOF: - // connection is closed by the remote - case isClosedConnectionError(err): - default: - cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error()) - } - } - select { - // Wait 100ms to create a new stream, so it doesn't bring too much - // overhead when retry. - case <-time.After(100 * time.Millisecond): - case <-cr.stopc: - plog.Infof("stopped streaming with peer %s (%s reader)", cr.peerID, t) - close(cr.done) - return - } - } -} - -func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { - var dec decoder - cr.mu.Lock() - switch t { - case streamTypeMsgAppV2: - dec = newMsgAppV2Decoder(rc, cr.tr.ID, cr.peerID) - case streamTypeMessage: - dec = &messageDecoder{r: rc} - default: - plog.Panicf("unhandled stream type %s", t) - } - select { - case <-cr.stopc: - cr.mu.Unlock() - if err := rc.Close(); err != nil { - return err - } - return io.EOF - default: - cr.closer = rc - } - cr.mu.Unlock() - - for { - m, err := dec.decode() - if err != nil { - cr.mu.Lock() - cr.close() - cr.mu.Unlock() - return err - } - - receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size())) - - cr.mu.Lock() - paused := cr.paused - cr.mu.Unlock() - - if paused { - continue - } - - if isLinkHeartbeatMessage(&m) { - // raft is not interested in link layer - // heartbeat message, so we should ignore - // it. - continue - } - - recvc := cr.recvc - if m.Type == raftpb.MsgProp { - recvc = cr.propc - } - - select { - case recvc <- m: - default: - if cr.status.isActive() { - plog.MergeWarningf("dropped internal raft message from %s since receiving buffer is full (overloaded network)", types.ID(m.From)) - } - plog.Debugf("dropped %s from %s since receiving buffer is full", m.Type, types.ID(m.From)) - recvFailures.WithLabelValues(types.ID(m.From).String()).Inc() - } - } -} - -func (cr *streamReader) stop() { - close(cr.stopc) - cr.mu.Lock() - if cr.cancel != nil { - cr.cancel() - } - cr.close() - cr.mu.Unlock() - <-cr.done -} - -func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { - u := cr.picker.pick() - uu := u - uu.Path = path.Join(t.endpoint(), cr.tr.ID.String()) - - req, err := http.NewRequest("GET", uu.String(), nil) - if err != nil { - cr.picker.unreachable(u) - return nil, fmt.Errorf("failed to make http request to %v (%v)", u, err) - } - req.Header.Set("X-Server-From", cr.tr.ID.String()) - req.Header.Set("X-Server-Version", version.Version) - req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion) - req.Header.Set("X-Etcd-Cluster-ID", cr.tr.ClusterID.String()) - req.Header.Set("X-Raft-To", cr.peerID.String()) - - setPeerURLsHeader(req, cr.tr.URLs) - - cr.mu.Lock() - select { - case <-cr.stopc: - cr.mu.Unlock() - return nil, fmt.Errorf("stream reader is stopped") - default: - } - cr.cancel = httputil.RequestCanceler(req) - cr.mu.Unlock() - - resp, err := cr.tr.streamRt.RoundTrip(req) - if err != nil { - cr.picker.unreachable(u) - return nil, err - } - - rv := serverVersion(resp.Header) - lv := semver.Must(semver.NewVersion(version.Version)) - if compareMajorMinorVersion(rv, lv) == -1 && !checkStreamSupport(rv, t) { - httputil.GracefulClose(resp) - cr.picker.unreachable(u) - return nil, errUnsupportedStreamType - } - - switch resp.StatusCode { - case http.StatusGone: - httputil.GracefulClose(resp) - cr.picker.unreachable(u) - reportCriticalError(errMemberRemoved, cr.errorc) - return nil, errMemberRemoved - case http.StatusOK: - return resp.Body, nil - case http.StatusNotFound: - httputil.GracefulClose(resp) - cr.picker.unreachable(u) - return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID) - case http.StatusPreconditionFailed: - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - cr.picker.unreachable(u) - return nil, err - } - httputil.GracefulClose(resp) - cr.picker.unreachable(u) - - switch strings.TrimSuffix(string(b), "\n") { - case errIncompatibleVersion.Error(): - plog.Errorf("request sent was ignored by peer %s (server version incompatible)", cr.peerID) - return nil, errIncompatibleVersion - case errClusterIDMismatch.Error(): - plog.Errorf("request sent was ignored (cluster ID mismatch: peer[%s]=%s, local=%s)", - cr.peerID, resp.Header.Get("X-Etcd-Cluster-ID"), cr.tr.ClusterID) - return nil, errClusterIDMismatch - default: - return nil, fmt.Errorf("unhandled error %q when precondition failed", string(b)) - } - default: - httputil.GracefulClose(resp) - cr.picker.unreachable(u) - return nil, fmt.Errorf("unhandled http status %d", resp.StatusCode) - } -} - -func (cr *streamReader) close() { - if cr.closer != nil { - cr.closer.Close() - } - cr.closer = nil -} - -func (cr *streamReader) pause() { - cr.mu.Lock() - defer cr.mu.Unlock() - cr.paused = true -} - -func (cr *streamReader) resume() { - cr.mu.Lock() - defer cr.mu.Unlock() - cr.paused = false -} - -func isClosedConnectionError(err error) bool { - operr, ok := err.(*net.OpError) - return ok && operr.Err.Error() == "use of closed network connection" -} - -// checkStreamSupport checks whether the stream type is supported in the -// given version. -func checkStreamSupport(v *semver.Version, t streamType) bool { - nv := &semver.Version{Major: v.Major, Minor: v.Minor} - for _, s := range supportedStream[nv.String()] { - if s == t { - return true - } - } - return false -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/transport.go b/vendor/github.com/coreos/etcd/rafthttp/transport.go deleted file mode 100644 index 1f0b46836e6..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/transport.go +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "net/http" - "sync" - "time" - - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/logutil" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" - "github.com/coreos/pkg/capnslog" - "github.com/xiang90/probing" - "golang.org/x/net/context" -) - -var plog = logutil.NewMergeLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "rafthttp")) - -type Raft interface { - Process(ctx context.Context, m raftpb.Message) error - IsIDRemoved(id uint64) bool - ReportUnreachable(id uint64) - ReportSnapshot(id uint64, status raft.SnapshotStatus) -} - -type Transporter interface { - // Start starts the given Transporter. - // Start MUST be called before calling other functions in the interface. - Start() error - // Handler returns the HTTP handler of the transporter. - // A transporter HTTP handler handles the HTTP requests - // from remote peers. - // The handler MUST be used to handle RaftPrefix(/raft) - // endpoint. - Handler() http.Handler - // Send sends out the given messages to the remote peers. - // Each message has a To field, which is an id that maps - // to an existing peer in the transport. - // If the id cannot be found in the transport, the message - // will be ignored. - Send(m []raftpb.Message) - // SendSnapshot sends out the given snapshot message to a remote peer. - // The behavior of SendSnapshot is similar to Send. - SendSnapshot(m snap.Message) - // AddRemote adds a remote with given peer urls into the transport. - // A remote helps newly joined member to catch up the progress of cluster, - // and will not be used after that. - // It is the caller's responsibility to ensure the urls are all valid, - // or it panics. - AddRemote(id types.ID, urls []string) - // AddPeer adds a peer with given peer urls into the transport. - // It is the caller's responsibility to ensure the urls are all valid, - // or it panics. - // Peer urls are used to connect to the remote peer. - AddPeer(id types.ID, urls []string) - // RemovePeer removes the peer with given id. - RemovePeer(id types.ID) - // RemoveAllPeers removes all the existing peers in the transport. - RemoveAllPeers() - // UpdatePeer updates the peer urls of the peer with the given id. - // It is the caller's responsibility to ensure the urls are all valid, - // or it panics. - UpdatePeer(id types.ID, urls []string) - // ActiveSince returns the time that the connection with the peer - // of the given id becomes active. - // If the connection is active since peer was added, it returns the adding time. - // If the connection is currently inactive, it returns zero time. - ActiveSince(id types.ID) time.Time - // Stop closes the connections and stops the transporter. - Stop() -} - -// Transport implements Transporter interface. It provides the functionality -// to send raft messages to peers, and receive raft messages from peers. -// User should call Handler method to get a handler to serve requests -// received from peerURLs. -// User needs to call Start before calling other functions, and call -// Stop when the Transport is no longer used. -type Transport struct { - DialTimeout time.Duration // maximum duration before timing out dial of the request - TLSInfo transport.TLSInfo // TLS information used when creating connection - - ID types.ID // local member ID - URLs types.URLs // local peer URLs - ClusterID types.ID // raft cluster ID for request validation - Raft Raft // raft state machine, to which the Transport forwards received messages and reports status - Snapshotter *snap.Snapshotter - ServerStats *stats.ServerStats // used to record general transportation statistics - // used to record transportation statistics with followers when - // performing as leader in raft protocol - LeaderStats *stats.LeaderStats - // ErrorC is used to report detected critical errors, e.g., - // the member has been permanently removed from the cluster - // When an error is received from ErrorC, user should stop raft state - // machine and thus stop the Transport. - ErrorC chan error - - streamRt http.RoundTripper // roundTripper used by streams - pipelineRt http.RoundTripper // roundTripper used by pipelines - - mu sync.RWMutex // protect the remote and peer map - remotes map[types.ID]*remote // remotes map that helps newly joined member to catch up - peers map[types.ID]Peer // peers map - - prober probing.Prober -} - -func (t *Transport) Start() error { - var err error - t.streamRt, err = newStreamRoundTripper(t.TLSInfo, t.DialTimeout) - if err != nil { - return err - } - t.pipelineRt, err = NewRoundTripper(t.TLSInfo, t.DialTimeout) - if err != nil { - return err - } - t.remotes = make(map[types.ID]*remote) - t.peers = make(map[types.ID]Peer) - t.prober = probing.NewProber(t.pipelineRt) - return nil -} - -func (t *Transport) Handler() http.Handler { - pipelineHandler := newPipelineHandler(t, t.Raft, t.ClusterID) - streamHandler := newStreamHandler(t, t, t.Raft, t.ID, t.ClusterID) - snapHandler := newSnapshotHandler(t, t.Raft, t.Snapshotter, t.ClusterID) - mux := http.NewServeMux() - mux.Handle(RaftPrefix, pipelineHandler) - mux.Handle(RaftStreamPrefix+"/", streamHandler) - mux.Handle(RaftSnapshotPrefix, snapHandler) - mux.Handle(ProbingPrefix, probing.NewHandler()) - return mux -} - -func (t *Transport) Get(id types.ID) Peer { - t.mu.RLock() - defer t.mu.RUnlock() - return t.peers[id] -} - -func (t *Transport) Send(msgs []raftpb.Message) { - for _, m := range msgs { - if m.To == 0 { - // ignore intentionally dropped message - continue - } - to := types.ID(m.To) - - t.mu.RLock() - p, pok := t.peers[to] - g, rok := t.remotes[to] - t.mu.RUnlock() - - if pok { - if m.Type == raftpb.MsgApp { - t.ServerStats.SendAppendReq(m.Size()) - } - p.send(m) - continue - } - - if rok { - g.send(m) - continue - } - - plog.Debugf("ignored message %s (sent to unknown peer %s)", m.Type, to) - } -} - -func (t *Transport) Stop() { - t.mu.Lock() - defer t.mu.Unlock() - for _, r := range t.remotes { - r.stop() - } - for _, p := range t.peers { - p.stop() - } - t.prober.RemoveAll() - if tr, ok := t.streamRt.(*http.Transport); ok { - tr.CloseIdleConnections() - } - if tr, ok := t.pipelineRt.(*http.Transport); ok { - tr.CloseIdleConnections() - } - t.peers = nil - t.remotes = nil -} - -// CutPeer drops messages to the specified peer. -func (t *Transport) CutPeer(id types.ID) { - t.mu.RLock() - p, pok := t.peers[id] - g, gok := t.remotes[id] - t.mu.RUnlock() - - if pok { - p.(Pausable).Pause() - } - if gok { - g.Pause() - } -} - -// MendPeer recovers the message dropping behavior of the given peer. -func (t *Transport) MendPeer(id types.ID) { - t.mu.RLock() - p, pok := t.peers[id] - g, gok := t.remotes[id] - t.mu.RUnlock() - - if pok { - p.(Pausable).Resume() - } - if gok { - g.Resume() - } -} - -func (t *Transport) AddRemote(id types.ID, us []string) { - t.mu.Lock() - defer t.mu.Unlock() - if t.remotes == nil { - // there's no clean way to shutdown the golang http server - // (see: https://github.com/golang/go/issues/4674) before - // stopping the transport; ignore any new connections. - return - } - if _, ok := t.peers[id]; ok { - return - } - if _, ok := t.remotes[id]; ok { - return - } - urls, err := types.NewURLs(us) - if err != nil { - plog.Panicf("newURLs %+v should never fail: %+v", us, err) - } - t.remotes[id] = startRemote(t, urls, id) -} - -func (t *Transport) AddPeer(id types.ID, us []string) { - t.mu.Lock() - defer t.mu.Unlock() - - if t.peers == nil { - panic("transport stopped") - } - if _, ok := t.peers[id]; ok { - return - } - urls, err := types.NewURLs(us) - if err != nil { - plog.Panicf("newURLs %+v should never fail: %+v", us, err) - } - fs := t.LeaderStats.Follower(id.String()) - t.peers[id] = startPeer(t, urls, id, fs) - addPeerToProber(t.prober, id.String(), us) - - plog.Infof("added peer %s", id) -} - -func (t *Transport) RemovePeer(id types.ID) { - t.mu.Lock() - defer t.mu.Unlock() - t.removePeer(id) -} - -func (t *Transport) RemoveAllPeers() { - t.mu.Lock() - defer t.mu.Unlock() - for id := range t.peers { - t.removePeer(id) - } -} - -// the caller of this function must have the peers mutex. -func (t *Transport) removePeer(id types.ID) { - if peer, ok := t.peers[id]; ok { - peer.stop() - } else { - plog.Panicf("unexpected removal of unknown peer '%d'", id) - } - delete(t.peers, id) - delete(t.LeaderStats.Followers, id.String()) - t.prober.Remove(id.String()) - plog.Infof("removed peer %s", id) -} - -func (t *Transport) UpdatePeer(id types.ID, us []string) { - t.mu.Lock() - defer t.mu.Unlock() - // TODO: return error or just panic? - if _, ok := t.peers[id]; !ok { - return - } - urls, err := types.NewURLs(us) - if err != nil { - plog.Panicf("newURLs %+v should never fail: %+v", us, err) - } - t.peers[id].update(urls) - - t.prober.Remove(id.String()) - addPeerToProber(t.prober, id.String(), us) - plog.Infof("updated peer %s", id) -} - -func (t *Transport) ActiveSince(id types.ID) time.Time { - t.mu.Lock() - defer t.mu.Unlock() - if p, ok := t.peers[id]; ok { - return p.activeSince() - } - return time.Time{} -} - -func (t *Transport) SendSnapshot(m snap.Message) { - t.mu.Lock() - defer t.mu.Unlock() - p := t.peers[types.ID(m.To)] - if p == nil { - m.CloseWithError(errMemberNotFound) - return - } - p.sendSnap(m) -} - -// Pausable is a testing interface for pausing transport traffic. -type Pausable interface { - Pause() - Resume() -} - -func (t *Transport) Pause() { - for _, p := range t.peers { - p.(Pausable).Pause() - } -} - -func (t *Transport) Resume() { - for _, p := range t.peers { - p.(Pausable).Resume() - } -} - -type nopTransporter struct{} - -func NewNopTransporter() Transporter { - return &nopTransporter{} -} - -func (s *nopTransporter) Start() error { return nil } -func (s *nopTransporter) Handler() http.Handler { return nil } -func (s *nopTransporter) Send(m []raftpb.Message) {} -func (s *nopTransporter) SendSnapshot(m snap.Message) {} -func (s *nopTransporter) AddRemote(id types.ID, us []string) {} -func (s *nopTransporter) AddPeer(id types.ID, us []string) {} -func (s *nopTransporter) RemovePeer(id types.ID) {} -func (s *nopTransporter) RemoveAllPeers() {} -func (s *nopTransporter) UpdatePeer(id types.ID, us []string) {} -func (s *nopTransporter) ActiveSince(id types.ID) time.Time { return time.Time{} } -func (s *nopTransporter) Stop() {} -func (s *nopTransporter) Pause() {} -func (s *nopTransporter) Resume() {} - -type snapTransporter struct { - nopTransporter - snapDoneC chan snap.Message - snapDir string -} - -func NewSnapTransporter(snapDir string) (Transporter, <-chan snap.Message) { - ch := make(chan snap.Message, 1) - tr := &snapTransporter{snapDoneC: ch, snapDir: snapDir} - return tr, ch -} - -func (s *snapTransporter) SendSnapshot(m snap.Message) { - ss := snap.New(s.snapDir) - ss.SaveDBFrom(m.ReadCloser, m.Snapshot.Metadata.Index+1) - m.CloseWithError(nil) - s.snapDoneC <- m -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/urlpick.go b/vendor/github.com/coreos/etcd/rafthttp/urlpick.go deleted file mode 100644 index 61839deeb70..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/urlpick.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "net/url" - "sync" - - "github.com/coreos/etcd/pkg/types" -) - -type urlPicker struct { - mu sync.Mutex // guards urls and picked - urls types.URLs - picked int -} - -func newURLPicker(urls types.URLs) *urlPicker { - return &urlPicker{ - urls: urls, - } -} - -func (p *urlPicker) update(urls types.URLs) { - p.mu.Lock() - defer p.mu.Unlock() - p.urls = urls - p.picked = 0 -} - -func (p *urlPicker) pick() url.URL { - p.mu.Lock() - defer p.mu.Unlock() - return p.urls[p.picked] -} - -// unreachable notices the picker that the given url is unreachable, -// and it should use other possible urls. -func (p *urlPicker) unreachable(u url.URL) { - p.mu.Lock() - defer p.mu.Unlock() - if u == p.urls[p.picked] { - p.picked = (p.picked + 1) % len(p.urls) - } -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/util.go b/vendor/github.com/coreos/etcd/rafthttp/util.go deleted file mode 100644 index 61855c52a60..00000000000 --- a/vendor/github.com/coreos/etcd/rafthttp/util.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rafthttp - -import ( - "crypto/tls" - "encoding/binary" - "fmt" - "io" - "net" - "net/http" - "net/url" - "strings" - "time" - - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/version" - "github.com/coreos/go-semver/semver" -) - -var ( - errMemberRemoved = fmt.Errorf("the member has been permanently removed from the cluster") - errMemberNotFound = fmt.Errorf("member not found") -) - -// NewListener returns a listener for raft message transfer between peers. -// It uses timeout listener to identify broken streams promptly. -func NewListener(u url.URL, tlscfg *tls.Config) (net.Listener, error) { - return transport.NewTimeoutListener(u.Host, u.Scheme, tlscfg, ConnReadTimeout, ConnWriteTimeout) -} - -// NewRoundTripper returns a roundTripper used to send requests -// to rafthttp listener of remote peers. -func NewRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) { - // It uses timeout transport to pair with remote timeout listeners. - // It sets no read/write timeout, because message in requests may - // take long time to write out before reading out the response. - return transport.NewTimeoutTransport(tlsInfo, dialTimeout, 0, 0) -} - -// newStreamRoundTripper returns a roundTripper used to send stream requests -// to rafthttp listener of remote peers. -// Read/write timeout is set for stream roundTripper to promptly -// find out broken status, which minimizes the number of messages -// sent on broken connection. -func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) { - return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout) -} - -func writeEntryTo(w io.Writer, ent *raftpb.Entry) error { - size := ent.Size() - if err := binary.Write(w, binary.BigEndian, uint64(size)); err != nil { - return err - } - b, err := ent.Marshal() - if err != nil { - return err - } - _, err = w.Write(b) - return err -} - -func readEntryFrom(r io.Reader, ent *raftpb.Entry) error { - var l uint64 - if err := binary.Read(r, binary.BigEndian, &l); err != nil { - return err - } - buf := make([]byte, int(l)) - if _, err := io.ReadFull(r, buf); err != nil { - return err - } - return ent.Unmarshal(buf) -} - -// createPostRequest creates a HTTP POST request that sends raft message. -func createPostRequest(u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request { - uu := u - uu.Path = path - req, err := http.NewRequest("POST", uu.String(), body) - if err != nil { - plog.Panicf("unexpected new request error (%v)", err) - } - req.Header.Set("Content-Type", ct) - req.Header.Set("X-Server-From", from.String()) - req.Header.Set("X-Server-Version", version.Version) - req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion) - req.Header.Set("X-Etcd-Cluster-ID", cid.String()) - setPeerURLsHeader(req, urls) - - return req -} - -// checkPostResponse checks the response of the HTTP POST request that sends -// raft message. -func checkPostResponse(resp *http.Response, body []byte, req *http.Request, to types.ID) error { - switch resp.StatusCode { - case http.StatusPreconditionFailed: - switch strings.TrimSuffix(string(body), "\n") { - case errIncompatibleVersion.Error(): - plog.Errorf("request sent was ignored by peer %s (server version incompatible)", to) - return errIncompatibleVersion - case errClusterIDMismatch.Error(): - plog.Errorf("request sent was ignored (cluster ID mismatch: remote[%s]=%s, local=%s)", - to, resp.Header.Get("X-Etcd-Cluster-ID"), req.Header.Get("X-Etcd-Cluster-ID")) - return errClusterIDMismatch - default: - return fmt.Errorf("unhandled error %q when precondition failed", string(body)) - } - case http.StatusForbidden: - return errMemberRemoved - case http.StatusNoContent: - return nil - default: - return fmt.Errorf("unexpected http status %s while posting to %q", http.StatusText(resp.StatusCode), req.URL.String()) - } -} - -// reportCriticalError reports the given error through sending it into -// the given error channel. -// If the error channel is filled up when sending error, it drops the error -// because the fact that error has happened is reported, which is -// good enough. -func reportCriticalError(err error, errc chan<- error) { - select { - case errc <- err: - default: - } -} - -// compareMajorMinorVersion returns an integer comparing two versions based on -// their major and minor version. The result will be 0 if a==b, -1 if a < b, -// and 1 if a > b. -func compareMajorMinorVersion(a, b *semver.Version) int { - na := &semver.Version{Major: a.Major, Minor: a.Minor} - nb := &semver.Version{Major: b.Major, Minor: b.Minor} - switch { - case na.LessThan(*nb): - return -1 - case nb.LessThan(*na): - return 1 - default: - return 0 - } -} - -// serverVersion returns the server version from the given header. -func serverVersion(h http.Header) *semver.Version { - verStr := h.Get("X-Server-Version") - // backward compatibility with etcd 2.0 - if verStr == "" { - verStr = "2.0.0" - } - return semver.Must(semver.NewVersion(verStr)) -} - -// serverVersion returns the min cluster version from the given header. -func minClusterVersion(h http.Header) *semver.Version { - verStr := h.Get("X-Min-Cluster-Version") - // backward compatibility with etcd 2.0 - if verStr == "" { - verStr = "2.0.0" - } - return semver.Must(semver.NewVersion(verStr)) -} - -// checkVersionCompability checks whether the given version is compatible -// with the local version. -func checkVersionCompability(name string, server, minCluster *semver.Version) error { - localServer := semver.Must(semver.NewVersion(version.Version)) - localMinCluster := semver.Must(semver.NewVersion(version.MinClusterVersion)) - if compareMajorMinorVersion(server, localMinCluster) == -1 { - return fmt.Errorf("remote version is too low: remote[%s]=%s, local=%s", name, server, localServer) - } - if compareMajorMinorVersion(minCluster, localServer) == 1 { - return fmt.Errorf("local version is too low: remote[%s]=%s, local=%s", name, server, localServer) - } - return nil -} - -// setPeerURLsHeader reports local urls for peer discovery -func setPeerURLsHeader(req *http.Request, urls types.URLs) { - if urls == nil { - // often not set in unit tests - return - } - peerURLs := make([]string, urls.Len()) - for i := range urls { - peerURLs[i] = urls[i].String() - } - req.Header.Set("X-PeerURLs", strings.Join(peerURLs, ",")) -} diff --git a/vendor/github.com/coreos/etcd/snap/db.go b/vendor/github.com/coreos/etcd/snap/db.go deleted file mode 100644 index ae3c743f80c..00000000000 --- a/vendor/github.com/coreos/etcd/snap/db.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/coreos/etcd/pkg/fileutil" -) - -// SaveDBFrom saves snapshot of the database from the given reader. It -// guarantees the save operation is atomic. -func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { - f, err := ioutil.TempFile(s.dir, "tmp") - if err != nil { - return 0, err - } - var n int64 - n, err = io.Copy(f, r) - if err == nil { - err = fileutil.Fsync(f) - } - f.Close() - if err != nil { - os.Remove(f.Name()) - return n, err - } - fn := filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) - if fileutil.Exist(fn) { - os.Remove(f.Name()) - return n, nil - } - err = os.Rename(f.Name(), fn) - if err != nil { - os.Remove(f.Name()) - return n, err - } - - plog.Infof("saved database snapshot to disk [total bytes: %d]", n) - - return n, nil -} - -// DBFilePath returns the file path for the snapshot of the database with -// given id. If the snapshot does not exist, it returns error. -func (s *Snapshotter) DBFilePath(id uint64) (string, error) { - fns, err := fileutil.ReadDir(s.dir) - if err != nil { - return "", err - } - wfn := fmt.Sprintf("%016x.snap.db", id) - for _, fn := range fns { - if fn == wfn { - return filepath.Join(s.dir, fn), nil - } - } - return "", fmt.Errorf("snap: snapshot file doesn't exist") -} diff --git a/vendor/github.com/coreos/etcd/snap/message.go b/vendor/github.com/coreos/etcd/snap/message.go deleted file mode 100644 index d73713ff169..00000000000 --- a/vendor/github.com/coreos/etcd/snap/message.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import ( - "io" - - "github.com/coreos/etcd/pkg/ioutil" - "github.com/coreos/etcd/raft/raftpb" -) - -// Message is a struct that contains a raft Message and a ReadCloser. The type -// of raft message MUST be MsgSnap, which contains the raft meta-data and an -// additional data []byte field that contains the snapshot of the actual state -// machine. -// Message contains the ReadCloser field for handling large snapshot. This avoid -// copying the entire snapshot into a byte array, which consumes a lot of memory. -// -// User of Message should close the Message after sending it. -type Message struct { - raftpb.Message - ReadCloser io.ReadCloser - TotalSize int64 - closeC chan bool -} - -func NewMessage(rs raftpb.Message, rc io.ReadCloser, rcSize int64) *Message { - return &Message{ - Message: rs, - ReadCloser: ioutil.NewExactReadCloser(rc, rcSize), - TotalSize: int64(rs.Size()) + rcSize, - closeC: make(chan bool, 1), - } -} - -// CloseNotify returns a channel that receives a single value -// when the message sent is finished. true indicates the sent -// is successful. -func (m Message) CloseNotify() <-chan bool { - return m.closeC -} - -func (m Message) CloseWithError(err error) { - if cerr := m.ReadCloser.Close(); cerr != nil { - err = cerr - } - if err == nil { - m.closeC <- true - } else { - m.closeC <- false - } -} diff --git a/vendor/github.com/coreos/etcd/snap/metrics.go b/vendor/github.com/coreos/etcd/snap/metrics.go deleted file mode 100644 index 433ef09d4ba..00000000000 --- a/vendor/github.com/coreos/etcd/snap/metrics.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import "github.com/prometheus/client_golang/prometheus" - -var ( - // TODO: save_fsync latency? - saveDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "snap", - Name: "save_total_duration_seconds", - Help: "The total latency distributions of save called by snapshot.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - - marshallingDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd_debugging", - Subsystem: "snap", - Name: "save_marshalling_duration_seconds", - Help: "The marshalling cost distributions of save called by snapshot.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) -) - -func init() { - prometheus.MustRegister(saveDurations) - prometheus.MustRegister(marshallingDurations) -} diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go deleted file mode 100644 index 130e2277c84..00000000000 --- a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go +++ /dev/null @@ -1,353 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: snap.proto -// DO NOT EDIT! - -/* - Package snappb is a generated protocol buffer package. - - It is generated from these files: - snap.proto - - It has these top-level messages: - Snapshot -*/ -package snappb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Snapshot struct { - Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"` - Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnap, []int{0} } - -func init() { - proto.RegisterType((*Snapshot)(nil), "snappb.snapshot") -} -func (m *Snapshot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintSnap(dAtA, i, uint64(m.Crc)) - if m.Data != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintSnap(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Snap(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Snap(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintSnap(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Snapshot) Size() (n int) { - var l int - _ = l - n += 1 + sovSnap(uint64(m.Crc)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovSnap(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovSnap(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozSnap(x uint64) (n int) { - return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Snapshot) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) - } - m.Crc = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Crc |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnap - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnap(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSnap - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSnap(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthSnap - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipSnap(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) } - -var fileDescriptorSnap = []byte{ - // 126 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, - 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, - 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, - 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, - 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24, - 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1, - 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e, - 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/etcd/snap/snapshotter.go b/vendor/github.com/coreos/etcd/snap/snapshotter.go deleted file mode 100644 index 00755592129..00000000000 --- a/vendor/github.com/coreos/etcd/snap/snapshotter.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package snap stores raft nodes' states with snapshots. -package snap - -import ( - "errors" - "fmt" - "hash/crc32" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "time" - - pioutil "github.com/coreos/etcd/pkg/ioutil" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap/snappb" - - "github.com/coreos/pkg/capnslog" -) - -const ( - snapSuffix = ".snap" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "snap") - - ErrNoSnapshot = errors.New("snap: no available snapshot") - ErrEmptySnapshot = errors.New("snap: empty snapshot") - ErrCRCMismatch = errors.New("snap: crc mismatch") - crcTable = crc32.MakeTable(crc32.Castagnoli) - - // A map of valid files that can be present in the snap folder. - validFiles = map[string]bool{ - "db": true, - } -) - -type Snapshotter struct { - dir string -} - -func New(dir string) *Snapshotter { - return &Snapshotter{ - dir: dir, - } -} - -func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error { - if raft.IsEmptySnap(snapshot) { - return nil - } - return s.save(&snapshot) -} - -func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error { - start := time.Now() - - fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix) - b := pbutil.MustMarshal(snapshot) - crc := crc32.Update(0, crcTable, b) - snap := snappb.Snapshot{Crc: crc, Data: b} - d, err := snap.Marshal() - if err != nil { - return err - } else { - marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second)) - } - - err = pioutil.WriteAndSyncFile(filepath.Join(s.dir, fname), d, 0666) - if err == nil { - saveDurations.Observe(float64(time.Since(start)) / float64(time.Second)) - } else { - err1 := os.Remove(filepath.Join(s.dir, fname)) - if err1 != nil { - plog.Errorf("failed to remove broken snapshot file %s", filepath.Join(s.dir, fname)) - } - } - return err -} - -func (s *Snapshotter) Load() (*raftpb.Snapshot, error) { - names, err := s.snapNames() - if err != nil { - return nil, err - } - var snap *raftpb.Snapshot - for _, name := range names { - if snap, err = loadSnap(s.dir, name); err == nil { - break - } - } - if err != nil { - return nil, ErrNoSnapshot - } - return snap, nil -} - -func loadSnap(dir, name string) (*raftpb.Snapshot, error) { - fpath := filepath.Join(dir, name) - snap, err := Read(fpath) - if err != nil { - renameBroken(fpath) - } - return snap, err -} - -// Read reads the snapshot named by snapname and returns the snapshot. -func Read(snapname string) (*raftpb.Snapshot, error) { - b, err := ioutil.ReadFile(snapname) - if err != nil { - plog.Errorf("cannot read file %v: %v", snapname, err) - return nil, err - } - - if len(b) == 0 { - plog.Errorf("unexpected empty snapshot") - return nil, ErrEmptySnapshot - } - - var serializedSnap snappb.Snapshot - if err = serializedSnap.Unmarshal(b); err != nil { - plog.Errorf("corrupted snapshot file %v: %v", snapname, err) - return nil, err - } - - if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 { - plog.Errorf("unexpected empty snapshot") - return nil, ErrEmptySnapshot - } - - crc := crc32.Update(0, crcTable, serializedSnap.Data) - if crc != serializedSnap.Crc { - plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname) - return nil, ErrCRCMismatch - } - - var snap raftpb.Snapshot - if err = snap.Unmarshal(serializedSnap.Data); err != nil { - plog.Errorf("corrupted snapshot file %v: %v", snapname, err) - return nil, err - } - return &snap, nil -} - -// snapNames returns the filename of the snapshots in logical time order (from newest to oldest). -// If there is no available snapshots, an ErrNoSnapshot will be returned. -func (s *Snapshotter) snapNames() ([]string, error) { - dir, err := os.Open(s.dir) - if err != nil { - return nil, err - } - defer dir.Close() - names, err := dir.Readdirnames(-1) - if err != nil { - return nil, err - } - snaps := checkSuffix(names) - if len(snaps) == 0 { - return nil, ErrNoSnapshot - } - sort.Sort(sort.Reverse(sort.StringSlice(snaps))) - return snaps, nil -} - -func checkSuffix(names []string) []string { - snaps := []string{} - for i := range names { - if strings.HasSuffix(names[i], snapSuffix) { - snaps = append(snaps, names[i]) - } else { - // If we find a file which is not a snapshot then check if it's - // a vaild file. If not throw out a warning. - if _, ok := validFiles[names[i]]; !ok { - plog.Warningf("skipped unexpected non snapshot file %v", names[i]) - } - } - } - return snaps -} - -func renameBroken(path string) { - brokenPath := path + ".broken" - if err := os.Rename(path, brokenPath); err != nil { - plog.Warningf("cannot rename broken snapshot file %v to %v: %v", path, brokenPath, err) - } -} diff --git a/vendor/github.com/coreos/etcd/store/doc.go b/vendor/github.com/coreos/etcd/store/doc.go deleted file mode 100644 index 612df927976..00000000000 --- a/vendor/github.com/coreos/etcd/store/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package store defines etcd's in-memory key/value store. -package store diff --git a/vendor/github.com/coreos/etcd/store/event.go b/vendor/github.com/coreos/etcd/store/event.go deleted file mode 100644 index efcddb0e053..00000000000 --- a/vendor/github.com/coreos/etcd/store/event.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -const ( - Get = "get" - Create = "create" - Set = "set" - Update = "update" - Delete = "delete" - CompareAndSwap = "compareAndSwap" - CompareAndDelete = "compareAndDelete" - Expire = "expire" -) - -type Event struct { - Action string `json:"action"` - Node *NodeExtern `json:"node,omitempty"` - PrevNode *NodeExtern `json:"prevNode,omitempty"` - EtcdIndex uint64 `json:"-"` - Refresh bool `json:"refresh,omitempty"` -} - -func newEvent(action string, key string, modifiedIndex, createdIndex uint64) *Event { - n := &NodeExtern{ - Key: key, - ModifiedIndex: modifiedIndex, - CreatedIndex: createdIndex, - } - - return &Event{ - Action: action, - Node: n, - } -} - -func (e *Event) IsCreated() bool { - if e.Action == Create { - return true - } - return e.Action == Set && e.PrevNode == nil -} - -func (e *Event) Index() uint64 { - return e.Node.ModifiedIndex -} - -func (e *Event) Clone() *Event { - return &Event{ - Action: e.Action, - EtcdIndex: e.EtcdIndex, - Node: e.Node.Clone(), - PrevNode: e.PrevNode.Clone(), - } -} - -func (e *Event) SetRefresh() { - e.Refresh = true -} diff --git a/vendor/github.com/coreos/etcd/store/event_history.go b/vendor/github.com/coreos/etcd/store/event_history.go deleted file mode 100644 index 235d87a2664..00000000000 --- a/vendor/github.com/coreos/etcd/store/event_history.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -import ( - "fmt" - "path" - "strings" - "sync" - - etcdErr "github.com/coreos/etcd/error" -) - -type EventHistory struct { - Queue eventQueue - StartIndex uint64 - LastIndex uint64 - rwl sync.RWMutex -} - -func newEventHistory(capacity int) *EventHistory { - return &EventHistory{ - Queue: eventQueue{ - Capacity: capacity, - Events: make([]*Event, capacity), - }, - } -} - -// addEvent function adds event into the eventHistory -func (eh *EventHistory) addEvent(e *Event) *Event { - eh.rwl.Lock() - defer eh.rwl.Unlock() - - eh.Queue.insert(e) - - eh.LastIndex = e.Index() - - eh.StartIndex = eh.Queue.Events[eh.Queue.Front].Index() - - return e -} - -// scan enumerates events from the index history and stops at the first point -// where the key matches. -func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event, *etcdErr.Error) { - eh.rwl.RLock() - defer eh.rwl.RUnlock() - - // index should be after the event history's StartIndex - if index < eh.StartIndex { - return nil, - etcdErr.NewError(etcdErr.EcodeEventIndexCleared, - fmt.Sprintf("the requested history has been cleared [%v/%v]", - eh.StartIndex, index), 0) - } - - // the index should come before the size of the queue minus the duplicate count - if index > eh.LastIndex { // future index - return nil, nil - } - - offset := index - eh.StartIndex - i := (eh.Queue.Front + int(offset)) % eh.Queue.Capacity - - for { - e := eh.Queue.Events[i] - - if !e.Refresh { - ok := (e.Node.Key == key) - - if recursive { - // add tailing slash - nkey := path.Clean(key) - if nkey[len(nkey)-1] != '/' { - nkey = nkey + "/" - } - - ok = ok || strings.HasPrefix(e.Node.Key, nkey) - } - - if (e.Action == Delete || e.Action == Expire) && e.PrevNode != nil && e.PrevNode.Dir { - ok = ok || strings.HasPrefix(key, e.PrevNode.Key) - } - - if ok { - return e, nil - } - } - - i = (i + 1) % eh.Queue.Capacity - - if i == eh.Queue.Back { - return nil, nil - } - } -} - -// clone will be protected by a stop-world lock -// do not need to obtain internal lock -func (eh *EventHistory) clone() *EventHistory { - clonedQueue := eventQueue{ - Capacity: eh.Queue.Capacity, - Events: make([]*Event, eh.Queue.Capacity), - Size: eh.Queue.Size, - Front: eh.Queue.Front, - Back: eh.Queue.Back, - } - - copy(clonedQueue.Events, eh.Queue.Events) - return &EventHistory{ - StartIndex: eh.StartIndex, - Queue: clonedQueue, - LastIndex: eh.LastIndex, - } - -} diff --git a/vendor/github.com/coreos/etcd/store/event_queue.go b/vendor/github.com/coreos/etcd/store/event_queue.go deleted file mode 100644 index 767b835913e..00000000000 --- a/vendor/github.com/coreos/etcd/store/event_queue.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -type eventQueue struct { - Events []*Event - Size int - Front int - Back int - Capacity int -} - -func (eq *eventQueue) insert(e *Event) { - eq.Events[eq.Back] = e - eq.Back = (eq.Back + 1) % eq.Capacity - - if eq.Size == eq.Capacity { //dequeue - eq.Front = (eq.Front + 1) % eq.Capacity - } else { - eq.Size++ - } -} diff --git a/vendor/github.com/coreos/etcd/store/metrics.go b/vendor/github.com/coreos/etcd/store/metrics.go deleted file mode 100644 index 26404ba72e3..00000000000 --- a/vendor/github.com/coreos/etcd/store/metrics.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -// Set of raw Prometheus metrics. -// Labels -// * action = declared in event.go -// * outcome = Outcome -// Do not increment directly, use Report* methods. -var ( - readCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "reads_total", - Help: "Total number of reads action by (get/getRecursive), local to this member.", - }, []string{"action"}) - - writeCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "writes_total", - Help: "Total number of writes (e.g. set/compareAndDelete) seen by this member.", - }, []string{"action"}) - - readFailedCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "reads_failed_total", - Help: "Failed read actions by (get/getRecursive), local to this member.", - }, []string{"action"}) - - writeFailedCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "writes_failed_total", - Help: "Failed write actions (e.g. set/compareAndDelete), seen by this member.", - }, []string{"action"}) - - expireCounter = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "expires_total", - Help: "Total number of expired keys.", - }) - - watchRequests = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "watch_requests_total", - Help: "Total number of incoming watch requests (new or reestablished).", - }) - - watcherCount = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "store", - Name: "watchers", - Help: "Count of currently active watchers.", - }) -) - -const ( - GetRecursive = "getRecursive" -) - -func init() { - prometheus.MustRegister(readCounter) - prometheus.MustRegister(writeCounter) - prometheus.MustRegister(expireCounter) - prometheus.MustRegister(watchRequests) - prometheus.MustRegister(watcherCount) -} - -func reportReadSuccess(read_action string) { - readCounter.WithLabelValues(read_action).Inc() -} - -func reportReadFailure(read_action string) { - readCounter.WithLabelValues(read_action).Inc() - readFailedCounter.WithLabelValues(read_action).Inc() -} - -func reportWriteSuccess(write_action string) { - writeCounter.WithLabelValues(write_action).Inc() -} - -func reportWriteFailure(write_action string) { - writeCounter.WithLabelValues(write_action).Inc() - writeFailedCounter.WithLabelValues(write_action).Inc() -} - -func reportExpiredKey() { - expireCounter.Inc() -} - -func reportWatchRequest() { - watchRequests.Inc() -} - -func reportWatcherAdded() { - watcherCount.Inc() -} - -func reportWatcherRemoved() { - watcherCount.Dec() -} diff --git a/vendor/github.com/coreos/etcd/store/node.go b/vendor/github.com/coreos/etcd/store/node.go deleted file mode 100644 index 731327b08ba..00000000000 --- a/vendor/github.com/coreos/etcd/store/node.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -import ( - "path" - "sort" - "time" - - etcdErr "github.com/coreos/etcd/error" - "github.com/jonboulle/clockwork" -) - -// explanations of Compare function result -const ( - CompareMatch = iota - CompareIndexNotMatch - CompareValueNotMatch - CompareNotMatch -) - -var Permanent time.Time - -// node is the basic element in the store system. -// A key-value pair will have a string value -// A directory will have a children map -type node struct { - Path string - - CreatedIndex uint64 - ModifiedIndex uint64 - - Parent *node `json:"-"` // should not encode this field! avoid circular dependency. - - ExpireTime time.Time - Value string // for key-value pair - Children map[string]*node // for directory - - // A reference to the store this node is attached to. - store *store -} - -// newKV creates a Key-Value pair -func newKV(store *store, nodePath string, value string, createdIndex uint64, parent *node, expireTime time.Time) *node { - return &node{ - Path: nodePath, - CreatedIndex: createdIndex, - ModifiedIndex: createdIndex, - Parent: parent, - store: store, - ExpireTime: expireTime, - Value: value, - } -} - -// newDir creates a directory -func newDir(store *store, nodePath string, createdIndex uint64, parent *node, expireTime time.Time) *node { - return &node{ - Path: nodePath, - CreatedIndex: createdIndex, - ModifiedIndex: createdIndex, - Parent: parent, - ExpireTime: expireTime, - Children: make(map[string]*node), - store: store, - } -} - -// IsHidden function checks if the node is a hidden node. A hidden node -// will begin with '_' -// A hidden node will not be shown via get command under a directory -// For example if we have /foo/_hidden and /foo/notHidden, get "/foo" -// will only return /foo/notHidden -func (n *node) IsHidden() bool { - _, name := path.Split(n.Path) - - return name[0] == '_' -} - -// IsPermanent function checks if the node is a permanent one. -func (n *node) IsPermanent() bool { - // we use a uninitialized time.Time to indicate the node is a - // permanent one. - // the uninitialized time.Time should equal zero. - return n.ExpireTime.IsZero() -} - -// IsDir function checks whether the node is a directory. -// If the node is a directory, the function will return true. -// Otherwise the function will return false. -func (n *node) IsDir() bool { - return n.Children != nil -} - -// Read function gets the value of the node. -// If the receiver node is not a key-value pair, a "Not A File" error will be returned. -func (n *node) Read() (string, *etcdErr.Error) { - if n.IsDir() { - return "", etcdErr.NewError(etcdErr.EcodeNotFile, "", n.store.CurrentIndex) - } - - return n.Value, nil -} - -// Write function set the value of the node to the given value. -// If the receiver node is a directory, a "Not A File" error will be returned. -func (n *node) Write(value string, index uint64) *etcdErr.Error { - if n.IsDir() { - return etcdErr.NewError(etcdErr.EcodeNotFile, "", n.store.CurrentIndex) - } - - n.Value = value - n.ModifiedIndex = index - - return nil -} - -func (n *node) expirationAndTTL(clock clockwork.Clock) (*time.Time, int64) { - if !n.IsPermanent() { - /* compute ttl as: - ceiling( (expireTime - timeNow) / nanosecondsPerSecond ) - which ranges from 1..n - rather than as: - ( (expireTime - timeNow) / nanosecondsPerSecond ) + 1 - which ranges 1..n+1 - */ - ttlN := n.ExpireTime.Sub(clock.Now()) - ttl := ttlN / time.Second - if (ttlN % time.Second) > 0 { - ttl++ - } - t := n.ExpireTime.UTC() - return &t, int64(ttl) - } - return nil, 0 -} - -// List function return a slice of nodes under the receiver node. -// If the receiver node is not a directory, a "Not A Directory" error will be returned. -func (n *node) List() ([]*node, *etcdErr.Error) { - if !n.IsDir() { - return nil, etcdErr.NewError(etcdErr.EcodeNotDir, "", n.store.CurrentIndex) - } - - nodes := make([]*node, len(n.Children)) - - i := 0 - for _, node := range n.Children { - nodes[i] = node - i++ - } - - return nodes, nil -} - -// GetChild function returns the child node under the directory node. -// On success, it returns the file node -func (n *node) GetChild(name string) (*node, *etcdErr.Error) { - if !n.IsDir() { - return nil, etcdErr.NewError(etcdErr.EcodeNotDir, n.Path, n.store.CurrentIndex) - } - - child, ok := n.Children[name] - - if ok { - return child, nil - } - - return nil, nil -} - -// Add function adds a node to the receiver node. -// If the receiver is not a directory, a "Not A Directory" error will be returned. -// If there is an existing node with the same name under the directory, a "Already Exist" -// error will be returned -func (n *node) Add(child *node) *etcdErr.Error { - if !n.IsDir() { - return etcdErr.NewError(etcdErr.EcodeNotDir, "", n.store.CurrentIndex) - } - - _, name := path.Split(child.Path) - - if _, ok := n.Children[name]; ok { - return etcdErr.NewError(etcdErr.EcodeNodeExist, "", n.store.CurrentIndex) - } - - n.Children[name] = child - - return nil -} - -// Remove function remove the node. -func (n *node) Remove(dir, recursive bool, callback func(path string)) *etcdErr.Error { - if !n.IsDir() { // key-value pair - _, name := path.Split(n.Path) - - // find its parent and remove the node from the map - if n.Parent != nil && n.Parent.Children[name] == n { - delete(n.Parent.Children, name) - } - - if callback != nil { - callback(n.Path) - } - - if !n.IsPermanent() { - n.store.ttlKeyHeap.remove(n) - } - - return nil - } - - if !dir { - // cannot delete a directory without dir set to true - return etcdErr.NewError(etcdErr.EcodeNotFile, n.Path, n.store.CurrentIndex) - } - - if len(n.Children) != 0 && !recursive { - // cannot delete a directory if it is not empty and the operation - // is not recursive - return etcdErr.NewError(etcdErr.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex) - } - - for _, child := range n.Children { // delete all children - child.Remove(true, true, callback) - } - - // delete self - _, name := path.Split(n.Path) - if n.Parent != nil && n.Parent.Children[name] == n { - delete(n.Parent.Children, name) - - if callback != nil { - callback(n.Path) - } - - if !n.IsPermanent() { - n.store.ttlKeyHeap.remove(n) - } - } - - return nil -} - -func (n *node) Repr(recursive, sorted bool, clock clockwork.Clock) *NodeExtern { - if n.IsDir() { - node := &NodeExtern{ - Key: n.Path, - Dir: true, - ModifiedIndex: n.ModifiedIndex, - CreatedIndex: n.CreatedIndex, - } - node.Expiration, node.TTL = n.expirationAndTTL(clock) - - if !recursive { - return node - } - - children, _ := n.List() - node.Nodes = make(NodeExterns, len(children)) - - // we do not use the index in the children slice directly - // we need to skip the hidden one - i := 0 - - for _, child := range children { - - if child.IsHidden() { // get will not list hidden node - continue - } - - node.Nodes[i] = child.Repr(recursive, sorted, clock) - - i++ - } - - // eliminate hidden nodes - node.Nodes = node.Nodes[:i] - if sorted { - sort.Sort(node.Nodes) - } - - return node - } - - // since n.Value could be changed later, so we need to copy the value out - value := n.Value - node := &NodeExtern{ - Key: n.Path, - Value: &value, - ModifiedIndex: n.ModifiedIndex, - CreatedIndex: n.CreatedIndex, - } - node.Expiration, node.TTL = n.expirationAndTTL(clock) - return node -} - -func (n *node) UpdateTTL(expireTime time.Time) { - if !n.IsPermanent() { - if expireTime.IsZero() { - // from ttl to permanent - n.ExpireTime = expireTime - // remove from ttl heap - n.store.ttlKeyHeap.remove(n) - return - } - - // update ttl - n.ExpireTime = expireTime - // update ttl heap - n.store.ttlKeyHeap.update(n) - return - } - - if expireTime.IsZero() { - return - } - - // from permanent to ttl - n.ExpireTime = expireTime - // push into ttl heap - n.store.ttlKeyHeap.push(n) - return -} - -// Compare function compares node index and value with provided ones. -// second result value explains result and equals to one of Compare.. constants -func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) { - indexMatch := (prevIndex == 0 || n.ModifiedIndex == prevIndex) - valueMatch := (prevValue == "" || n.Value == prevValue) - ok = valueMatch && indexMatch - switch { - case valueMatch && indexMatch: - which = CompareMatch - case indexMatch && !valueMatch: - which = CompareValueNotMatch - case valueMatch && !indexMatch: - which = CompareIndexNotMatch - default: - which = CompareNotMatch - } - return -} - -// Clone function clone the node recursively and return the new node. -// If the node is a directory, it will clone all the content under this directory. -// If the node is a key-value pair, it will clone the pair. -func (n *node) Clone() *node { - if !n.IsDir() { - newkv := newKV(n.store, n.Path, n.Value, n.CreatedIndex, n.Parent, n.ExpireTime) - newkv.ModifiedIndex = n.ModifiedIndex - return newkv - } - - clone := newDir(n.store, n.Path, n.CreatedIndex, n.Parent, n.ExpireTime) - clone.ModifiedIndex = n.ModifiedIndex - - for key, child := range n.Children { - clone.Children[key] = child.Clone() - } - - return clone -} - -// recoverAndclean function help to do recovery. -// Two things need to be done: 1. recovery structure; 2. delete expired nodes -// -// If the node is a directory, it will help recover children's parent pointer and recursively -// call this function on its children. -// We check the expire last since we need to recover the whole structure first and add all the -// notifications into the event history. -func (n *node) recoverAndclean() { - if n.IsDir() { - for _, child := range n.Children { - child.Parent = n - child.store = n.store - child.recoverAndclean() - } - } - - if !n.ExpireTime.IsZero() { - n.store.ttlKeyHeap.push(n) - } -} diff --git a/vendor/github.com/coreos/etcd/store/node_extern.go b/vendor/github.com/coreos/etcd/store/node_extern.go deleted file mode 100644 index 7ba870cbe7b..00000000000 --- a/vendor/github.com/coreos/etcd/store/node_extern.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -import ( - "sort" - "time" - - "github.com/jonboulle/clockwork" -) - -// NodeExtern is the external representation of the -// internal node with additional fields -// PrevValue is the previous value of the node -// TTL is time to live in second -type NodeExtern struct { - Key string `json:"key,omitempty"` - Value *string `json:"value,omitempty"` - Dir bool `json:"dir,omitempty"` - Expiration *time.Time `json:"expiration,omitempty"` - TTL int64 `json:"ttl,omitempty"` - Nodes NodeExterns `json:"nodes,omitempty"` - ModifiedIndex uint64 `json:"modifiedIndex,omitempty"` - CreatedIndex uint64 `json:"createdIndex,omitempty"` -} - -func (eNode *NodeExtern) loadInternalNode(n *node, recursive, sorted bool, clock clockwork.Clock) { - if n.IsDir() { // node is a directory - eNode.Dir = true - - children, _ := n.List() - eNode.Nodes = make(NodeExterns, len(children)) - - // we do not use the index in the children slice directly - // we need to skip the hidden one - i := 0 - - for _, child := range children { - if child.IsHidden() { // get will not return hidden nodes - continue - } - - eNode.Nodes[i] = child.Repr(recursive, sorted, clock) - i++ - } - - // eliminate hidden nodes - eNode.Nodes = eNode.Nodes[:i] - - if sorted { - sort.Sort(eNode.Nodes) - } - - } else { // node is a file - value, _ := n.Read() - eNode.Value = &value - } - - eNode.Expiration, eNode.TTL = n.expirationAndTTL(clock) -} - -func (eNode *NodeExtern) Clone() *NodeExtern { - if eNode == nil { - return nil - } - nn := &NodeExtern{ - Key: eNode.Key, - Dir: eNode.Dir, - TTL: eNode.TTL, - ModifiedIndex: eNode.ModifiedIndex, - CreatedIndex: eNode.CreatedIndex, - } - if eNode.Value != nil { - s := *eNode.Value - nn.Value = &s - } - if eNode.Expiration != nil { - t := *eNode.Expiration - nn.Expiration = &t - } - if eNode.Nodes != nil { - nn.Nodes = make(NodeExterns, len(eNode.Nodes)) - for i, n := range eNode.Nodes { - nn.Nodes[i] = n.Clone() - } - } - return nn -} - -type NodeExterns []*NodeExtern - -// interfaces for sorting - -func (ns NodeExterns) Len() int { - return len(ns) -} - -func (ns NodeExterns) Less(i, j int) bool { - return ns[i].Key < ns[j].Key -} - -func (ns NodeExterns) Swap(i, j int) { - ns[i], ns[j] = ns[j], ns[i] -} diff --git a/vendor/github.com/coreos/etcd/store/stats.go b/vendor/github.com/coreos/etcd/store/stats.go deleted file mode 100644 index 59b45f2b8b3..00000000000 --- a/vendor/github.com/coreos/etcd/store/stats.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -import ( - "encoding/json" - "sync/atomic" -) - -const ( - SetSuccess = iota - SetFail - DeleteSuccess - DeleteFail - CreateSuccess - CreateFail - UpdateSuccess - UpdateFail - CompareAndSwapSuccess - CompareAndSwapFail - GetSuccess - GetFail - ExpireCount - CompareAndDeleteSuccess - CompareAndDeleteFail -) - -type Stats struct { - - // Number of get requests - - GetSuccess uint64 `json:"getsSuccess"` - GetFail uint64 `json:"getsFail"` - - // Number of sets requests - - SetSuccess uint64 `json:"setsSuccess"` - SetFail uint64 `json:"setsFail"` - - // Number of delete requests - - DeleteSuccess uint64 `json:"deleteSuccess"` - DeleteFail uint64 `json:"deleteFail"` - - // Number of update requests - - UpdateSuccess uint64 `json:"updateSuccess"` - UpdateFail uint64 `json:"updateFail"` - - // Number of create requests - - CreateSuccess uint64 `json:"createSuccess"` - CreateFail uint64 `json:"createFail"` - - // Number of testAndSet requests - - CompareAndSwapSuccess uint64 `json:"compareAndSwapSuccess"` - CompareAndSwapFail uint64 `json:"compareAndSwapFail"` - - // Number of compareAndDelete requests - - CompareAndDeleteSuccess uint64 `json:"compareAndDeleteSuccess"` - CompareAndDeleteFail uint64 `json:"compareAndDeleteFail"` - - ExpireCount uint64 `json:"expireCount"` - - Watchers uint64 `json:"watchers"` -} - -func newStats() *Stats { - s := new(Stats) - return s -} - -func (s *Stats) clone() *Stats { - return &Stats{ - GetSuccess: s.GetSuccess, - GetFail: s.GetFail, - SetSuccess: s.SetSuccess, - SetFail: s.SetFail, - DeleteSuccess: s.DeleteSuccess, - DeleteFail: s.DeleteFail, - UpdateSuccess: s.UpdateSuccess, - UpdateFail: s.UpdateFail, - CreateSuccess: s.CreateSuccess, - CreateFail: s.CreateFail, - CompareAndSwapSuccess: s.CompareAndSwapSuccess, - CompareAndSwapFail: s.CompareAndSwapFail, - CompareAndDeleteSuccess: s.CompareAndDeleteSuccess, - CompareAndDeleteFail: s.CompareAndDeleteFail, - ExpireCount: s.ExpireCount, - Watchers: s.Watchers, - } -} - -func (s *Stats) toJson() []byte { - b, _ := json.Marshal(s) - return b -} - -func (s *Stats) Inc(field int) { - switch field { - case SetSuccess: - atomic.AddUint64(&s.SetSuccess, 1) - case SetFail: - atomic.AddUint64(&s.SetFail, 1) - case CreateSuccess: - atomic.AddUint64(&s.CreateSuccess, 1) - case CreateFail: - atomic.AddUint64(&s.CreateFail, 1) - case DeleteSuccess: - atomic.AddUint64(&s.DeleteSuccess, 1) - case DeleteFail: - atomic.AddUint64(&s.DeleteFail, 1) - case GetSuccess: - atomic.AddUint64(&s.GetSuccess, 1) - case GetFail: - atomic.AddUint64(&s.GetFail, 1) - case UpdateSuccess: - atomic.AddUint64(&s.UpdateSuccess, 1) - case UpdateFail: - atomic.AddUint64(&s.UpdateFail, 1) - case CompareAndSwapSuccess: - atomic.AddUint64(&s.CompareAndSwapSuccess, 1) - case CompareAndSwapFail: - atomic.AddUint64(&s.CompareAndSwapFail, 1) - case CompareAndDeleteSuccess: - atomic.AddUint64(&s.CompareAndDeleteSuccess, 1) - case CompareAndDeleteFail: - atomic.AddUint64(&s.CompareAndDeleteFail, 1) - case ExpireCount: - atomic.AddUint64(&s.ExpireCount, 1) - } -} diff --git a/vendor/github.com/coreos/etcd/store/store.go b/vendor/github.com/coreos/etcd/store/store.go deleted file mode 100644 index 6c19ad4c970..00000000000 --- a/vendor/github.com/coreos/etcd/store/store.go +++ /dev/null @@ -1,788 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -import ( - "encoding/json" - "fmt" - "path" - "strconv" - "strings" - "sync" - "time" - - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/pkg/types" - "github.com/jonboulle/clockwork" -) - -// The default version to set when the store is first initialized. -const defaultVersion = 2 - -var minExpireTime time.Time - -func init() { - minExpireTime, _ = time.Parse(time.RFC3339, "2000-01-01T00:00:00Z") -} - -type Store interface { - Version() int - Index() uint64 - - Get(nodePath string, recursive, sorted bool) (*Event, error) - Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) - Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) - Create(nodePath string, dir bool, value string, unique bool, - expireOpts TTLOptionSet) (*Event, error) - CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, - value string, expireOpts TTLOptionSet) (*Event, error) - Delete(nodePath string, dir, recursive bool) (*Event, error) - CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) - - Watch(prefix string, recursive, stream bool, sinceIndex uint64) (Watcher, error) - - Save() ([]byte, error) - Recovery(state []byte) error - - Clone() Store - SaveNoCopy() ([]byte, error) - - JsonStats() []byte - DeleteExpiredKeys(cutoff time.Time) - - HasTTLKeys() bool -} - -type TTLOptionSet struct { - ExpireTime time.Time - Refresh bool -} - -type store struct { - Root *node - WatcherHub *watcherHub - CurrentIndex uint64 - Stats *Stats - CurrentVersion int - ttlKeyHeap *ttlKeyHeap // need to recovery manually - worldLock sync.RWMutex // stop the world lock - clock clockwork.Clock - readonlySet types.Set -} - -// New creates a store where the given namespaces will be created as initial directories. -func New(namespaces ...string) Store { - s := newStore(namespaces...) - s.clock = clockwork.NewRealClock() - return s -} - -func newStore(namespaces ...string) *store { - s := new(store) - s.CurrentVersion = defaultVersion - s.Root = newDir(s, "/", s.CurrentIndex, nil, Permanent) - for _, namespace := range namespaces { - s.Root.Add(newDir(s, namespace, s.CurrentIndex, s.Root, Permanent)) - } - s.Stats = newStats() - s.WatcherHub = newWatchHub(1000) - s.ttlKeyHeap = newTtlKeyHeap() - s.readonlySet = types.NewUnsafeSet(append(namespaces, "/")...) - return s -} - -// Version retrieves current version of the store. -func (s *store) Version() int { - return s.CurrentVersion -} - -// Index retrieves the current index of the store. -func (s *store) Index() uint64 { - s.worldLock.RLock() - defer s.worldLock.RUnlock() - return s.CurrentIndex -} - -// Get returns a get event. -// If recursive is true, it will return all the content under the node path. -// If sorted is true, it will sort the content by keys. -func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) { - var err *etcdErr.Error - - s.worldLock.RLock() - defer s.worldLock.RUnlock() - - defer func() { - if err == nil { - s.Stats.Inc(GetSuccess) - if recursive { - reportReadSuccess(GetRecursive) - } else { - reportReadSuccess(Get) - } - return - } - - s.Stats.Inc(GetFail) - if recursive { - reportReadFailure(GetRecursive) - } else { - reportReadFailure(Get) - } - }() - - n, err := s.internalGet(nodePath) - if err != nil { - return nil, err - } - - e := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex) - e.EtcdIndex = s.CurrentIndex - e.Node.loadInternalNode(n, recursive, sorted, s.clock) - - return e, nil -} - -// Create creates the node at nodePath. Create will help to create intermediate directories with no ttl. -// If the node has already existed, create will fail. -// If any node on the path is a file, create will fail. -func (s *store) Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) { - var err *etcdErr.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(CreateSuccess) - reportWriteSuccess(Create) - return - } - - s.Stats.Inc(CreateFail) - reportWriteFailure(Create) - }() - - e, err := s.internalCreate(nodePath, dir, value, unique, false, expireOpts.ExpireTime, Create) - if err != nil { - return nil, err - } - - e.EtcdIndex = s.CurrentIndex - s.WatcherHub.notify(e) - - return e, nil -} - -// Set creates or replace the node at nodePath. -func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) { - var err *etcdErr.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(SetSuccess) - reportWriteSuccess(Set) - return - } - - s.Stats.Inc(SetFail) - reportWriteFailure(Set) - }() - - // Get prevNode value - n, getErr := s.internalGet(nodePath) - if getErr != nil && getErr.ErrorCode != etcdErr.EcodeKeyNotFound { - err = getErr - return nil, err - } - - if expireOpts.Refresh { - if getErr != nil { - err = getErr - return nil, err - } else { - value = n.Value - } - } - - // Set new value - e, err := s.internalCreate(nodePath, dir, value, false, true, expireOpts.ExpireTime, Set) - if err != nil { - return nil, err - } - e.EtcdIndex = s.CurrentIndex - - // Put prevNode into event - if getErr == nil { - prev := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex) - prev.Node.loadInternalNode(n, false, false, s.clock) - e.PrevNode = prev.Node - } - - if !expireOpts.Refresh { - s.WatcherHub.notify(e) - } else { - e.SetRefresh() - s.WatcherHub.add(e) - } - - return e, nil -} - -// returns user-readable cause of failed comparison -func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) string { - switch which { - case CompareIndexNotMatch: - return fmt.Sprintf("[%v != %v]", prevIndex, n.ModifiedIndex) - case CompareValueNotMatch: - return fmt.Sprintf("[%v != %v]", prevValue, n.Value) - default: - return fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, n.Value, prevIndex, n.ModifiedIndex) - } -} - -func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, - value string, expireOpts TTLOptionSet) (*Event, error) { - - var err *etcdErr.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(CompareAndSwapSuccess) - reportWriteSuccess(CompareAndSwap) - return - } - - s.Stats.Inc(CompareAndSwapFail) - reportWriteFailure(CompareAndSwap) - }() - - nodePath = path.Clean(path.Join("/", nodePath)) - // we do not allow the user to change "/" - if s.readonlySet.Contains(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex) - } - - n, err := s.internalGet(nodePath) - if err != nil { - return nil, err - } - if n.IsDir() { // can only compare and swap file - err = etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, s.CurrentIndex) - return nil, err - } - - // If both of the prevValue and prevIndex are given, we will test both of them. - // Command will be executed, only if both of the tests are successful. - if ok, which := n.Compare(prevValue, prevIndex); !ok { - cause := getCompareFailCause(n, which, prevValue, prevIndex) - err = etcdErr.NewError(etcdErr.EcodeTestFailed, cause, s.CurrentIndex) - return nil, err - } - - if expireOpts.Refresh { - value = n.Value - } - - // update etcd index - s.CurrentIndex++ - - e := newEvent(CompareAndSwap, nodePath, s.CurrentIndex, n.CreatedIndex) - e.EtcdIndex = s.CurrentIndex - e.PrevNode = n.Repr(false, false, s.clock) - eNode := e.Node - - // if test succeed, write the value - n.Write(value, s.CurrentIndex) - n.UpdateTTL(expireOpts.ExpireTime) - - // copy the value for safety - valueCopy := value - eNode.Value = &valueCopy - eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) - - if !expireOpts.Refresh { - s.WatcherHub.notify(e) - } else { - e.SetRefresh() - s.WatcherHub.add(e) - } - - return e, nil -} - -// Delete deletes the node at the given path. -// If the node is a directory, recursive must be true to delete it. -func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) { - var err *etcdErr.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(DeleteSuccess) - reportWriteSuccess(Delete) - return - } - - s.Stats.Inc(DeleteFail) - reportWriteFailure(Delete) - }() - - nodePath = path.Clean(path.Join("/", nodePath)) - // we do not allow the user to change "/" - if s.readonlySet.Contains(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex) - } - - // recursive implies dir - if recursive { - dir = true - } - - n, err := s.internalGet(nodePath) - if err != nil { // if the node does not exist, return error - return nil, err - } - - nextIndex := s.CurrentIndex + 1 - e := newEvent(Delete, nodePath, nextIndex, n.CreatedIndex) - e.EtcdIndex = nextIndex - e.PrevNode = n.Repr(false, false, s.clock) - eNode := e.Node - - if n.IsDir() { - eNode.Dir = true - } - - callback := func(path string) { // notify function - // notify the watchers with deleted set true - s.WatcherHub.notifyWatchers(e, path, true) - } - - err = n.Remove(dir, recursive, callback) - if err != nil { - return nil, err - } - - // update etcd index - s.CurrentIndex++ - - s.WatcherHub.notify(e) - - return e, nil -} - -func (s *store) CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) { - var err *etcdErr.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(CompareAndDeleteSuccess) - reportWriteSuccess(CompareAndDelete) - return - } - - s.Stats.Inc(CompareAndDeleteFail) - reportWriteFailure(CompareAndDelete) - }() - - nodePath = path.Clean(path.Join("/", nodePath)) - - n, err := s.internalGet(nodePath) - if err != nil { // if the node does not exist, return error - return nil, err - } - if n.IsDir() { // can only compare and delete file - return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, s.CurrentIndex) - } - - // If both of the prevValue and prevIndex are given, we will test both of them. - // Command will be executed, only if both of the tests are successful. - if ok, which := n.Compare(prevValue, prevIndex); !ok { - cause := getCompareFailCause(n, which, prevValue, prevIndex) - return nil, etcdErr.NewError(etcdErr.EcodeTestFailed, cause, s.CurrentIndex) - } - - // update etcd index - s.CurrentIndex++ - - e := newEvent(CompareAndDelete, nodePath, s.CurrentIndex, n.CreatedIndex) - e.EtcdIndex = s.CurrentIndex - e.PrevNode = n.Repr(false, false, s.clock) - - callback := func(path string) { // notify function - // notify the watchers with deleted set true - s.WatcherHub.notifyWatchers(e, path, true) - } - - err = n.Remove(false, false, callback) - if err != nil { - return nil, err - } - - s.WatcherHub.notify(e) - - return e, nil -} - -func (s *store) Watch(key string, recursive, stream bool, sinceIndex uint64) (Watcher, error) { - s.worldLock.RLock() - defer s.worldLock.RUnlock() - - key = path.Clean(path.Join("/", key)) - if sinceIndex == 0 { - sinceIndex = s.CurrentIndex + 1 - } - // WatcherHub does not know about the current index, so we need to pass it in - w, err := s.WatcherHub.watch(key, recursive, stream, sinceIndex, s.CurrentIndex) - if err != nil { - return nil, err - } - - return w, nil -} - -// walk walks all the nodePath and apply the walkFunc on each directory -func (s *store) walk(nodePath string, walkFunc func(prev *node, component string) (*node, *etcdErr.Error)) (*node, *etcdErr.Error) { - components := strings.Split(nodePath, "/") - - curr := s.Root - var err *etcdErr.Error - - for i := 1; i < len(components); i++ { - if len(components[i]) == 0 { // ignore empty string - return curr, nil - } - - curr, err = walkFunc(curr, components[i]) - if err != nil { - return nil, err - } - } - - return curr, nil -} - -// Update updates the value/ttl of the node. -// If the node is a file, the value and the ttl can be updated. -// If the node is a directory, only the ttl can be updated. -func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) { - var err *etcdErr.Error - - s.worldLock.Lock() - defer s.worldLock.Unlock() - - defer func() { - if err == nil { - s.Stats.Inc(UpdateSuccess) - reportWriteSuccess(Update) - return - } - - s.Stats.Inc(UpdateFail) - reportWriteFailure(Update) - }() - - nodePath = path.Clean(path.Join("/", nodePath)) - // we do not allow the user to change "/" - if s.readonlySet.Contains(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex) - } - - currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1 - - n, err := s.internalGet(nodePath) - if err != nil { // if the node does not exist, return error - return nil, err - } - if n.IsDir() && len(newValue) != 0 { - // if the node is a directory, we cannot update value to non-empty - return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, currIndex) - } - - if expireOpts.Refresh { - newValue = n.Value - } - - e := newEvent(Update, nodePath, nextIndex, n.CreatedIndex) - e.EtcdIndex = nextIndex - e.PrevNode = n.Repr(false, false, s.clock) - eNode := e.Node - - n.Write(newValue, nextIndex) - - if n.IsDir() { - eNode.Dir = true - } else { - // copy the value for safety - newValueCopy := newValue - eNode.Value = &newValueCopy - } - - // update ttl - n.UpdateTTL(expireOpts.ExpireTime) - - eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) - - if !expireOpts.Refresh { - s.WatcherHub.notify(e) - } else { - e.SetRefresh() - s.WatcherHub.add(e) - } - - s.CurrentIndex = nextIndex - - return e, nil -} - -func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool, - expireTime time.Time, action string) (*Event, *etcdErr.Error) { - - currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1 - - if unique { // append unique item under the node path - nodePath += "/" + fmt.Sprintf("%020s", strconv.FormatUint(nextIndex, 10)) - } - - nodePath = path.Clean(path.Join("/", nodePath)) - - // we do not allow the user to change "/" - if s.readonlySet.Contains(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", currIndex) - } - - // Assume expire times that are way in the past are - // This can occur when the time is serialized to JS - if expireTime.Before(minExpireTime) { - expireTime = Permanent - } - - dirName, nodeName := path.Split(nodePath) - - // walk through the nodePath, create dirs and get the last directory node - d, err := s.walk(dirName, s.checkDir) - - if err != nil { - s.Stats.Inc(SetFail) - reportWriteFailure(action) - err.Index = currIndex - return nil, err - } - - e := newEvent(action, nodePath, nextIndex, nextIndex) - eNode := e.Node - - n, _ := d.GetChild(nodeName) - - // force will try to replace an existing file - if n != nil { - if replace { - if n.IsDir() { - return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, currIndex) - } - e.PrevNode = n.Repr(false, false, s.clock) - - n.Remove(false, false, nil) - } else { - return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, currIndex) - } - } - - if !dir { // create file - // copy the value for safety - valueCopy := value - eNode.Value = &valueCopy - - n = newKV(s, nodePath, value, nextIndex, d, expireTime) - - } else { // create directory - eNode.Dir = true - - n = newDir(s, nodePath, nextIndex, d, expireTime) - } - - // we are sure d is a directory and does not have the children with name n.Name - d.Add(n) - - // node with TTL - if !n.IsPermanent() { - s.ttlKeyHeap.push(n) - - eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) - } - - s.CurrentIndex = nextIndex - - return e, nil -} - -// InternalGet gets the node of the given nodePath. -func (s *store) internalGet(nodePath string) (*node, *etcdErr.Error) { - nodePath = path.Clean(path.Join("/", nodePath)) - - walkFunc := func(parent *node, name string) (*node, *etcdErr.Error) { - - if !parent.IsDir() { - err := etcdErr.NewError(etcdErr.EcodeNotDir, parent.Path, s.CurrentIndex) - return nil, err - } - - child, ok := parent.Children[name] - if ok { - return child, nil - } - - return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex) - } - - f, err := s.walk(nodePath, walkFunc) - - if err != nil { - return nil, err - } - return f, nil -} - -// DeleteExpiredKeys will delete all expired keys -func (s *store) DeleteExpiredKeys(cutoff time.Time) { - s.worldLock.Lock() - defer s.worldLock.Unlock() - - for { - node := s.ttlKeyHeap.top() - if node == nil || node.ExpireTime.After(cutoff) { - break - } - - s.CurrentIndex++ - e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex) - e.EtcdIndex = s.CurrentIndex - e.PrevNode = node.Repr(false, false, s.clock) - - callback := func(path string) { // notify function - // notify the watchers with deleted set true - s.WatcherHub.notifyWatchers(e, path, true) - } - - s.ttlKeyHeap.pop() - node.Remove(true, true, callback) - - reportExpiredKey() - s.Stats.Inc(ExpireCount) - - s.WatcherHub.notify(e) - } - -} - -// checkDir will check whether the component is a directory under parent node. -// If it is a directory, this function will return the pointer to that node. -// If it does not exist, this function will create a new directory and return the pointer to that node. -// If it is a file, this function will return error. -func (s *store) checkDir(parent *node, dirName string) (*node, *etcdErr.Error) { - node, ok := parent.Children[dirName] - - if ok { - if node.IsDir() { - return node, nil - } - - return nil, etcdErr.NewError(etcdErr.EcodeNotDir, node.Path, s.CurrentIndex) - } - - n := newDir(s, path.Join(parent.Path, dirName), s.CurrentIndex+1, parent, Permanent) - - parent.Children[dirName] = n - - return n, nil -} - -// Save saves the static state of the store system. -// It will not be able to save the state of watchers. -// It will not save the parent field of the node. Or there will -// be cyclic dependencies issue for the json package. -func (s *store) Save() ([]byte, error) { - b, err := json.Marshal(s.Clone()) - if err != nil { - return nil, err - } - - return b, nil -} - -func (s *store) SaveNoCopy() ([]byte, error) { - b, err := json.Marshal(s) - if err != nil { - return nil, err - } - - return b, nil -} - -func (s *store) Clone() Store { - s.worldLock.Lock() - - clonedStore := newStore() - clonedStore.CurrentIndex = s.CurrentIndex - clonedStore.Root = s.Root.Clone() - clonedStore.WatcherHub = s.WatcherHub.clone() - clonedStore.Stats = s.Stats.clone() - clonedStore.CurrentVersion = s.CurrentVersion - - s.worldLock.Unlock() - return clonedStore -} - -// Recovery recovers the store system from a static state -// It needs to recover the parent field of the nodes. -// It needs to delete the expired nodes since the saved time and also -// needs to create monitoring go routines. -func (s *store) Recovery(state []byte) error { - s.worldLock.Lock() - defer s.worldLock.Unlock() - err := json.Unmarshal(state, s) - - if err != nil { - return err - } - - s.ttlKeyHeap = newTtlKeyHeap() - - s.Root.recoverAndclean() - return nil -} - -func (s *store) JsonStats() []byte { - s.Stats.Watchers = uint64(s.WatcherHub.count) - return s.Stats.toJson() -} - -func (s *store) HasTTLKeys() bool { - s.worldLock.RLock() - defer s.worldLock.RUnlock() - return s.ttlKeyHeap.Len() != 0 -} diff --git a/vendor/github.com/coreos/etcd/store/ttl_key_heap.go b/vendor/github.com/coreos/etcd/store/ttl_key_heap.go deleted file mode 100644 index 21ae9b7c699..00000000000 --- a/vendor/github.com/coreos/etcd/store/ttl_key_heap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -import ( - "container/heap" -) - -// An TTLKeyHeap is a min-heap of TTLKeys order by expiration time -type ttlKeyHeap struct { - array []*node - keyMap map[*node]int -} - -func newTtlKeyHeap() *ttlKeyHeap { - h := &ttlKeyHeap{keyMap: make(map[*node]int)} - heap.Init(h) - return h -} - -func (h ttlKeyHeap) Len() int { - return len(h.array) -} - -func (h ttlKeyHeap) Less(i, j int) bool { - return h.array[i].ExpireTime.Before(h.array[j].ExpireTime) -} - -func (h ttlKeyHeap) Swap(i, j int) { - // swap node - h.array[i], h.array[j] = h.array[j], h.array[i] - - // update map - h.keyMap[h.array[i]] = i - h.keyMap[h.array[j]] = j -} - -func (h *ttlKeyHeap) Push(x interface{}) { - n, _ := x.(*node) - h.keyMap[n] = len(h.array) - h.array = append(h.array, n) -} - -func (h *ttlKeyHeap) Pop() interface{} { - old := h.array - n := len(old) - x := old[n-1] - // Set slice element to nil, so GC can recycle the node. - // This is due to golang GC doesn't support partial recycling: - // https://github.com/golang/go/issues/9618 - old[n-1] = nil - h.array = old[0 : n-1] - delete(h.keyMap, x) - return x -} - -func (h *ttlKeyHeap) top() *node { - if h.Len() != 0 { - return h.array[0] - } - return nil -} - -func (h *ttlKeyHeap) pop() *node { - x := heap.Pop(h) - n, _ := x.(*node) - return n -} - -func (h *ttlKeyHeap) push(x interface{}) { - heap.Push(h, x) -} - -func (h *ttlKeyHeap) update(n *node) { - index, ok := h.keyMap[n] - if ok { - heap.Remove(h, index) - heap.Push(h, n) - } -} - -func (h *ttlKeyHeap) remove(n *node) { - index, ok := h.keyMap[n] - if ok { - heap.Remove(h, index) - } -} diff --git a/vendor/github.com/coreos/etcd/store/watcher.go b/vendor/github.com/coreos/etcd/store/watcher.go deleted file mode 100644 index a236ec77703..00000000000 --- a/vendor/github.com/coreos/etcd/store/watcher.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -type Watcher interface { - EventChan() chan *Event - StartIndex() uint64 // The EtcdIndex at which the Watcher was created - Remove() -} - -type watcher struct { - eventChan chan *Event - stream bool - recursive bool - sinceIndex uint64 - startIndex uint64 - hub *watcherHub - removed bool - remove func() -} - -func (w *watcher) EventChan() chan *Event { - return w.eventChan -} - -func (w *watcher) StartIndex() uint64 { - return w.startIndex -} - -// notify function notifies the watcher. If the watcher interests in the given path, -// the function will return true. -func (w *watcher) notify(e *Event, originalPath bool, deleted bool) bool { - // watcher is interested the path in three cases and under one condition - // the condition is that the event happens after the watcher's sinceIndex - - // 1. the path at which the event happens is the path the watcher is watching at. - // For example if the watcher is watching at "/foo" and the event happens at "/foo", - // the watcher must be interested in that event. - - // 2. the watcher is a recursive watcher, it interests in the event happens after - // its watching path. For example if watcher A watches at "/foo" and it is a recursive - // one, it will interest in the event happens at "/foo/bar". - - // 3. when we delete a directory, we need to force notify all the watchers who watches - // at the file we need to delete. - // For example a watcher is watching at "/foo/bar". And we deletes "/foo". The watcher - // should get notified even if "/foo" is not the path it is watching. - if (w.recursive || originalPath || deleted) && e.Index() >= w.sinceIndex { - // We cannot block here if the eventChan capacity is full, otherwise - // etcd will hang. eventChan capacity is full when the rate of - // notifications are higher than our send rate. - // If this happens, we close the channel. - select { - case w.eventChan <- e: - default: - // We have missed a notification. Remove the watcher. - // Removing the watcher also closes the eventChan. - w.remove() - } - return true - } - return false -} - -// Remove removes the watcher from watcherHub -// The actual remove function is guaranteed to only be executed once -func (w *watcher) Remove() { - w.hub.mutex.Lock() - defer w.hub.mutex.Unlock() - - close(w.eventChan) - if w.remove != nil { - w.remove() - } -} - -// nopWatcher is a watcher that receives nothing, always blocking. -type nopWatcher struct{} - -func NewNopWatcher() Watcher { return &nopWatcher{} } -func (w *nopWatcher) EventChan() chan *Event { return nil } -func (w *nopWatcher) StartIndex() uint64 { return 0 } -func (w *nopWatcher) Remove() {} diff --git a/vendor/github.com/coreos/etcd/store/watcher_hub.go b/vendor/github.com/coreos/etcd/store/watcher_hub.go deleted file mode 100644 index 6dd63f3c541..00000000000 --- a/vendor/github.com/coreos/etcd/store/watcher_hub.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package store - -import ( - "container/list" - "path" - "strings" - "sync" - "sync/atomic" - - etcdErr "github.com/coreos/etcd/error" -) - -// A watcherHub contains all subscribed watchers -// watchers is a map with watched path as key and watcher as value -// EventHistory keeps the old events for watcherHub. It is used to help -// watcher to get a continuous event history. Or a watcher might miss the -// event happens between the end of the first watch command and the start -// of the second command. -type watcherHub struct { - // count must be the first element to keep 64-bit alignment for atomic - // access - - count int64 // current number of watchers. - - mutex sync.Mutex - watchers map[string]*list.List - EventHistory *EventHistory -} - -// newWatchHub creates a watcherHub. The capacity determines how many events we will -// keep in the eventHistory. -// Typically, we only need to keep a small size of history[smaller than 20K]. -// Ideally, it should smaller than 20K/s[max throughput] * 2 * 50ms[RTT] = 2000 -func newWatchHub(capacity int) *watcherHub { - return &watcherHub{ - watchers: make(map[string]*list.List), - EventHistory: newEventHistory(capacity), - } -} - -// Watch function returns a Watcher. -// If recursive is true, the first change after index under key will be sent to the event channel of the watcher. -// If recursive is false, the first change after index at key will be sent to the event channel of the watcher. -// If index is zero, watch will start from the current index + 1. -func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *etcdErr.Error) { - reportWatchRequest() - event, err := wh.EventHistory.scan(key, recursive, index) - - if err != nil { - err.Index = storeIndex - return nil, err - } - - w := &watcher{ - eventChan: make(chan *Event, 100), // use a buffered channel - recursive: recursive, - stream: stream, - sinceIndex: index, - startIndex: storeIndex, - hub: wh, - } - - wh.mutex.Lock() - defer wh.mutex.Unlock() - // If the event exists in the known history, append the EtcdIndex and return immediately - if event != nil { - ne := event.Clone() - ne.EtcdIndex = storeIndex - w.eventChan <- ne - return w, nil - } - - l, ok := wh.watchers[key] - - var elem *list.Element - - if ok { // add the new watcher to the back of the list - elem = l.PushBack(w) - } else { // create a new list and add the new watcher - l = list.New() - elem = l.PushBack(w) - wh.watchers[key] = l - } - - w.remove = func() { - if w.removed { // avoid removing it twice - return - } - w.removed = true - l.Remove(elem) - atomic.AddInt64(&wh.count, -1) - reportWatcherRemoved() - if l.Len() == 0 { - delete(wh.watchers, key) - } - } - - atomic.AddInt64(&wh.count, 1) - reportWatcherAdded() - - return w, nil -} - -func (wh *watcherHub) add(e *Event) { - e = wh.EventHistory.addEvent(e) -} - -// notify function accepts an event and notify to the watchers. -func (wh *watcherHub) notify(e *Event) { - e = wh.EventHistory.addEvent(e) // add event into the eventHistory - - segments := strings.Split(e.Node.Key, "/") - - currPath := "/" - - // walk through all the segments of the path and notify the watchers - // if the path is "/foo/bar", it will notify watchers with path "/", - // "/foo" and "/foo/bar" - - for _, segment := range segments { - currPath = path.Join(currPath, segment) - // notify the watchers who interests in the changes of current path - wh.notifyWatchers(e, currPath, false) - } -} - -func (wh *watcherHub) notifyWatchers(e *Event, nodePath string, deleted bool) { - wh.mutex.Lock() - defer wh.mutex.Unlock() - - l, ok := wh.watchers[nodePath] - if ok { - curr := l.Front() - - for curr != nil { - next := curr.Next() // save reference to the next one in the list - - w, _ := curr.Value.(*watcher) - - originalPath := (e.Node.Key == nodePath) - if (originalPath || !isHidden(nodePath, e.Node.Key)) && w.notify(e, originalPath, deleted) { - if !w.stream { // do not remove the stream watcher - // if we successfully notify a watcher - // we need to remove the watcher from the list - // and decrease the counter - w.removed = true - l.Remove(curr) - atomic.AddInt64(&wh.count, -1) - reportWatcherRemoved() - } - } - - curr = next // update current to the next element in the list - } - - if l.Len() == 0 { - // if we have notified all watcher in the list - // we can delete the list - delete(wh.watchers, nodePath) - } - } -} - -// clone function clones the watcherHub and return the cloned one. -// only clone the static content. do not clone the current watchers. -func (wh *watcherHub) clone() *watcherHub { - clonedHistory := wh.EventHistory.clone() - - return &watcherHub{ - EventHistory: clonedHistory, - } -} - -// isHidden checks to see if key path is considered hidden to watch path i.e. the -// last element is hidden or it's within a hidden directory -func isHidden(watchPath, keyPath string) bool { - // When deleting a directory, watchPath might be deeper than the actual keyPath - // For example, when deleting /foo we also need to notify watchers on /foo/bar. - if len(watchPath) > len(keyPath) { - return false - } - // if watch path is just a "/", after path will start without "/" - // add a "/" to deal with the special case when watchPath is "/" - afterPath := path.Clean("/" + keyPath[len(watchPath):]) - return strings.Contains(afterPath, "/_") -} diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go deleted file mode 100644 index 0173d6f11d3..00000000000 --- a/vendor/github.com/coreos/etcd/version/version.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package version implements etcd version parsing and contains latest version -// information. -package version - -import ( - "fmt" - "strings" - - "github.com/coreos/go-semver/semver" -) - -var ( - // MinClusterVersion is the min cluster version this etcd binary is compatible with. - MinClusterVersion = "3.0.0" - Version = "3.1.10" - APIVersion = "unknown" - - // Git SHA Value will be set during build - GitSHA = "Not provided (use ./build instead of go build)" -) - -func init() { - ver, err := semver.NewVersion(Version) - if err == nil { - APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) - } -} - -type Versions struct { - Server string `json:"etcdserver"` - Cluster string `json:"etcdcluster"` - // TODO: raft state machine version -} - -// Cluster only keeps the major.minor. -func Cluster(v string) string { - vs := strings.Split(v, ".") - if len(vs) <= 2 { - return v - } - return fmt.Sprintf("%s.%s", vs[0], vs[1]) -} diff --git a/vendor/github.com/coreos/etcd/wal/decoder.go b/vendor/github.com/coreos/etcd/wal/decoder.go deleted file mode 100644 index 0d9b4428c94..00000000000 --- a/vendor/github.com/coreos/etcd/wal/decoder.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "bufio" - "encoding/binary" - "hash" - "io" - "sync" - - "github.com/coreos/etcd/pkg/crc" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/wal/walpb" -) - -const minSectorSize = 512 - -type decoder struct { - mu sync.Mutex - brs []*bufio.Reader - - // lastValidOff file offset following the last valid decoded record - lastValidOff int64 - crc hash.Hash32 -} - -func newDecoder(r ...io.Reader) *decoder { - readers := make([]*bufio.Reader, len(r)) - for i := range r { - readers[i] = bufio.NewReader(r[i]) - } - return &decoder{ - brs: readers, - crc: crc.New(0, crcTable), - } -} - -func (d *decoder) decode(rec *walpb.Record) error { - rec.Reset() - d.mu.Lock() - defer d.mu.Unlock() - return d.decodeRecord(rec) -} - -func (d *decoder) decodeRecord(rec *walpb.Record) error { - if len(d.brs) == 0 { - return io.EOF - } - - l, err := readInt64(d.brs[0]) - if err == io.EOF || (err == nil && l == 0) { - // hit end of file or preallocated space - d.brs = d.brs[1:] - if len(d.brs) == 0 { - return io.EOF - } - d.lastValidOff = 0 - return d.decodeRecord(rec) - } - if err != nil { - return err - } - - recBytes, padBytes := decodeFrameSize(l) - - data := make([]byte, recBytes+padBytes) - if _, err = io.ReadFull(d.brs[0], data); err != nil { - // ReadFull returns io.EOF only if no bytes were read - // the decoder should treat this as an ErrUnexpectedEOF instead. - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - if err := rec.Unmarshal(data[:recBytes]); err != nil { - if d.isTornEntry(data) { - return io.ErrUnexpectedEOF - } - return err - } - - // skip crc checking if the record type is crcType - if rec.Type != crcType { - d.crc.Write(rec.Data) - if err := rec.Validate(d.crc.Sum32()); err != nil { - if d.isTornEntry(data) { - return io.ErrUnexpectedEOF - } - return err - } - } - // record decoded as valid; point last valid offset to end of record - d.lastValidOff += recBytes + padBytes + 8 - return nil -} - -func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) { - // the record size is stored in the lower 56 bits of the 64-bit length - recBytes = int64(uint64(lenField) & ^(uint64(0xff) << 56)) - // non-zero padding is indicated by set MSb / a negative length - if lenField < 0 { - // padding is stored in lower 3 bits of length MSB - padBytes = int64((uint64(lenField) >> 56) & 0x7) - } - return -} - -// isTornEntry determines whether the last entry of the WAL was partially written -// and corrupted because of a torn write. -func (d *decoder) isTornEntry(data []byte) bool { - if len(d.brs) != 1 { - return false - } - - fileOff := d.lastValidOff + 8 - curOff := 0 - chunks := [][]byte{} - // split data on sector boundaries - for curOff < len(data) { - chunkLen := int(minSectorSize - (fileOff % minSectorSize)) - if chunkLen > len(data)-curOff { - chunkLen = len(data) - curOff - } - chunks = append(chunks, data[curOff:curOff+chunkLen]) - fileOff += int64(chunkLen) - curOff += chunkLen - } - - // if any data for a sector chunk is all 0, it's a torn write - for _, sect := range chunks { - isZero := true - for _, v := range sect { - if v != 0 { - isZero = false - break - } - } - if isZero { - return true - } - } - return false -} - -func (d *decoder) updateCRC(prevCrc uint32) { - d.crc = crc.New(prevCrc, crcTable) -} - -func (d *decoder) lastCRC() uint32 { - return d.crc.Sum32() -} - -func (d *decoder) lastOffset() int64 { return d.lastValidOff } - -func mustUnmarshalEntry(d []byte) raftpb.Entry { - var e raftpb.Entry - pbutil.MustUnmarshal(&e, d) - return e -} - -func mustUnmarshalState(d []byte) raftpb.HardState { - var s raftpb.HardState - pbutil.MustUnmarshal(&s, d) - return s -} - -func readInt64(r io.Reader) (int64, error) { - var n int64 - err := binary.Read(r, binary.LittleEndian, &n) - return n, err -} diff --git a/vendor/github.com/coreos/etcd/wal/doc.go b/vendor/github.com/coreos/etcd/wal/doc.go deleted file mode 100644 index a3abd69613d..00000000000 --- a/vendor/github.com/coreos/etcd/wal/doc.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package wal provides an implementation of a write ahead log that is used by -etcd. - -A WAL is created at a particular directory and is made up of a number of -segmented WAL files. Inside of each file the raft state and entries are appended -to it with the Save method: - - metadata := []byte{} - w, err := wal.Create("/var/lib/etcd", metadata) - ... - err := w.Save(s, ents) - -After saving a raft snapshot to disk, SaveSnapshot method should be called to -record it. So WAL can match with the saved snapshot when restarting. - - err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2}) - -When a user has finished using a WAL it must be closed: - - w.Close() - -Each WAL file is a stream of WAL records. A WAL record is a length field and a wal record -protobuf. The record protobuf contains a CRC, a type, and a data payload. The length field is a -64-bit packed structure holding the length of the remaining logical record data in its lower -56 bits and its physical padding in the first three bits of the most significant byte. Each -record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32 -value of all record protobufs preceding the current record. - -WAL files are placed inside of the directory in the following format: -$seq-$index.wal - -The first WAL file to be created will be 0000000000000000-0000000000000000.wal -indicating an initial sequence of 0 and an initial raft index of 0. The first -entry written to WAL MUST have raft index 0. - -WAL will cut its current tail wal file if its size exceeds 64MB. This will increment an internal -sequence number and cause a new file to be created. If the last raft index saved -was 0x20 and this is the first time cut has been called on this WAL then the sequence will -increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal. -If a second cut issues 0x10 entries with incremental index later then the file will be called: -0000000000000002-0000000000000031.wal. - -At a later time a WAL can be opened at a particular snapshot. If there is no -snapshot, an empty snapshot should be passed in. - - w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2}) - ... - -The snapshot must have been written to the WAL. - -Additional items cannot be Saved to this WAL until all of the items from the given -snapshot to the end of the WAL are read first: - - metadata, state, ents, err := w.ReadAll() - -This will give you the metadata, the last raft.State and the slice of -raft.Entry items in the log. - -*/ -package wal diff --git a/vendor/github.com/coreos/etcd/wal/encoder.go b/vendor/github.com/coreos/etcd/wal/encoder.go deleted file mode 100644 index efe58928cc8..00000000000 --- a/vendor/github.com/coreos/etcd/wal/encoder.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "encoding/binary" - "hash" - "io" - "os" - "sync" - - "github.com/coreos/etcd/pkg/crc" - "github.com/coreos/etcd/pkg/ioutil" - "github.com/coreos/etcd/wal/walpb" -) - -// walPageBytes is the alignment for flushing records to the backing Writer. -// It should be a multiple of the minimum sector size so that WAL can safely -// distinguish between torn writes and ordinary data corruption. -const walPageBytes = 8 * minSectorSize - -type encoder struct { - mu sync.Mutex - bw *ioutil.PageWriter - - crc hash.Hash32 - buf []byte - uint64buf []byte -} - -func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder { - return &encoder{ - bw: ioutil.NewPageWriter(w, walPageBytes, pageOffset), - crc: crc.New(prevCrc, crcTable), - // 1MB buffer - buf: make([]byte, 1024*1024), - uint64buf: make([]byte, 8), - } -} - -// newFileEncoder creates a new encoder with current file offset for the page writer. -func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) { - offset, err := f.Seek(0, os.SEEK_CUR) - if err != nil { - return nil, err - } - return newEncoder(f, prevCrc, int(offset)), nil -} - -func (e *encoder) encode(rec *walpb.Record) error { - e.mu.Lock() - defer e.mu.Unlock() - - e.crc.Write(rec.Data) - rec.Crc = e.crc.Sum32() - var ( - data []byte - err error - n int - ) - - if rec.Size() > len(e.buf) { - data, err = rec.Marshal() - if err != nil { - return err - } - } else { - n, err = rec.MarshalTo(e.buf) - if err != nil { - return err - } - data = e.buf[:n] - } - - lenField, padBytes := encodeFrameSize(len(data)) - if err = writeUint64(e.bw, lenField, e.uint64buf); err != nil { - return err - } - - if padBytes != 0 { - data = append(data, make([]byte, padBytes)...) - } - _, err = e.bw.Write(data) - return err -} - -func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) { - lenField = uint64(dataBytes) - // force 8 byte alignment so length never gets a torn write - padBytes = (8 - (dataBytes % 8)) % 8 - if padBytes != 0 { - lenField |= uint64(0x80|padBytes) << 56 - } - return -} - -func (e *encoder) flush() error { - e.mu.Lock() - defer e.mu.Unlock() - return e.bw.Flush() -} - -func writeUint64(w io.Writer, n uint64, buf []byte) error { - // http://golang.org/src/encoding/binary/binary.go - binary.LittleEndian.PutUint64(buf, n) - _, err := w.Write(buf) - return err -} diff --git a/vendor/github.com/coreos/etcd/wal/file_pipeline.go b/vendor/github.com/coreos/etcd/wal/file_pipeline.go deleted file mode 100644 index 5e32a0693c1..00000000000 --- a/vendor/github.com/coreos/etcd/wal/file_pipeline.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/coreos/etcd/pkg/fileutil" -) - -// filePipeline pipelines allocating disk space -type filePipeline struct { - // dir to put files - dir string - // size of files to make, in bytes - size int64 - // count number of files generated - count int - - filec chan *fileutil.LockedFile - errc chan error - donec chan struct{} -} - -func newFilePipeline(dir string, fileSize int64) *filePipeline { - fp := &filePipeline{ - dir: dir, - size: fileSize, - filec: make(chan *fileutil.LockedFile), - errc: make(chan error, 1), - donec: make(chan struct{}), - } - go fp.run() - return fp -} - -// Open returns a fresh file for writing. Rename the file before calling -// Open again or there will be file collisions. -func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) { - select { - case f = <-fp.filec: - case err = <-fp.errc: - } - return -} - -func (fp *filePipeline) Close() error { - close(fp.donec) - return <-fp.errc -} - -func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) { - // count % 2 so this file isn't the same as the one last published - fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2)) - if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil { - return nil, err - } - if err = fileutil.Preallocate(f.File, fp.size, true); err != nil { - plog.Errorf("failed to allocate space when creating new wal file (%v)", err) - f.Close() - return nil, err - } - fp.count++ - return f, nil -} - -func (fp *filePipeline) run() { - defer close(fp.errc) - for { - f, err := fp.alloc() - if err != nil { - fp.errc <- err - return - } - select { - case fp.filec <- f: - case <-fp.donec: - os.Remove(f.Name()) - f.Close() - return - } - } -} diff --git a/vendor/github.com/coreos/etcd/wal/metrics.go b/vendor/github.com/coreos/etcd/wal/metrics.go deleted file mode 100644 index 9e089d380f9..00000000000 --- a/vendor/github.com/coreos/etcd/wal/metrics.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import "github.com/prometheus/client_golang/prometheus" - -var ( - syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "disk", - Name: "wal_fsync_duration_seconds", - Help: "The latency distributions of fsync called by wal.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) -) - -func init() { - prometheus.MustRegister(syncDurations) -} diff --git a/vendor/github.com/coreos/etcd/wal/repair.go b/vendor/github.com/coreos/etcd/wal/repair.go deleted file mode 100644 index ffb14161682..00000000000 --- a/vendor/github.com/coreos/etcd/wal/repair.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "io" - "os" - "path/filepath" - - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/wal/walpb" -) - -// Repair tries to repair ErrUnexpectedEOF in the -// last wal file by truncating. -func Repair(dirpath string) bool { - f, err := openLast(dirpath) - if err != nil { - return false - } - defer f.Close() - - rec := &walpb.Record{} - decoder := newDecoder(f) - for { - lastOffset := decoder.lastOffset() - err := decoder.decode(rec) - switch err { - case nil: - // update crc of the decoder when necessary - switch rec.Type { - case crcType: - crc := decoder.crc.Sum32() - // current crc of decoder must match the crc of the record. - // do no need to match 0 crc, since the decoder is a new one at this case. - if crc != 0 && rec.Validate(crc) != nil { - return false - } - decoder.updateCRC(rec.Crc) - } - continue - case io.EOF: - return true - case io.ErrUnexpectedEOF: - plog.Noticef("repairing %v", f.Name()) - bf, bferr := os.Create(f.Name() + ".broken") - if bferr != nil { - plog.Errorf("could not repair %v, failed to create backup file", f.Name()) - return false - } - defer bf.Close() - - if _, err = f.Seek(0, os.SEEK_SET); err != nil { - plog.Errorf("could not repair %v, failed to read file", f.Name()) - return false - } - - if _, err = io.Copy(bf, f); err != nil { - plog.Errorf("could not repair %v, failed to copy file", f.Name()) - return false - } - - if err = f.Truncate(int64(lastOffset)); err != nil { - plog.Errorf("could not repair %v, failed to truncate file", f.Name()) - return false - } - if err = fileutil.Fsync(f.File); err != nil { - plog.Errorf("could not repair %v, failed to sync file", f.Name()) - return false - } - return true - default: - plog.Errorf("could not repair error (%v)", err) - return false - } - } -} - -// openLast opens the last wal file for read and write. -func openLast(dirpath string) (*fileutil.LockedFile, error) { - names, err := readWalNames(dirpath) - if err != nil { - return nil, err - } - last := filepath.Join(dirpath, names[len(names)-1]) - return fileutil.LockFile(last, os.O_RDWR, fileutil.PrivateFileMode) -} diff --git a/vendor/github.com/coreos/etcd/wal/util.go b/vendor/github.com/coreos/etcd/wal/util.go deleted file mode 100644 index 5c56e228872..00000000000 --- a/vendor/github.com/coreos/etcd/wal/util.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "errors" - "fmt" - "strings" - - "github.com/coreos/etcd/pkg/fileutil" -) - -var ( - badWalName = errors.New("bad wal name") -) - -func Exist(dirpath string) bool { - names, err := fileutil.ReadDir(dirpath) - if err != nil { - return false - } - return len(names) != 0 -} - -// searchIndex returns the last array index of names whose raft index section is -// equal to or smaller than the given index. -// The given names MUST be sorted. -func searchIndex(names []string, index uint64) (int, bool) { - for i := len(names) - 1; i >= 0; i-- { - name := names[i] - _, curIndex, err := parseWalName(name) - if err != nil { - plog.Panicf("parse correct name should never fail: %v", err) - } - if index >= curIndex { - return i, true - } - } - return -1, false -} - -// names should have been sorted based on sequence number. -// isValidSeq checks whether seq increases continuously. -func isValidSeq(names []string) bool { - var lastSeq uint64 - for _, name := range names { - curSeq, _, err := parseWalName(name) - if err != nil { - plog.Panicf("parse correct name should never fail: %v", err) - } - if lastSeq != 0 && lastSeq != curSeq-1 { - return false - } - lastSeq = curSeq - } - return true -} -func readWalNames(dirpath string) ([]string, error) { - names, err := fileutil.ReadDir(dirpath) - if err != nil { - return nil, err - } - wnames := checkWalNames(names) - if len(wnames) == 0 { - return nil, ErrFileNotFound - } - return wnames, nil -} - -func checkWalNames(names []string) []string { - wnames := make([]string, 0) - for _, name := range names { - if _, _, err := parseWalName(name); err != nil { - // don't complain about left over tmp files - if !strings.HasSuffix(name, ".tmp") { - plog.Warningf("ignored file %v in wal", name) - } - continue - } - wnames = append(wnames, name) - } - return wnames -} - -func parseWalName(str string) (seq, index uint64, err error) { - if !strings.HasSuffix(str, ".wal") { - return 0, 0, badWalName - } - _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index) - return seq, index, err -} - -func walName(seq, index uint64) string { - return fmt.Sprintf("%016x-%016x.wal", seq, index) -} diff --git a/vendor/github.com/coreos/etcd/wal/wal.go b/vendor/github.com/coreos/etcd/wal/wal.go deleted file mode 100644 index b65f6448304..00000000000 --- a/vendor/github.com/coreos/etcd/wal/wal.go +++ /dev/null @@ -1,637 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "bytes" - "errors" - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "sync" - "time" - - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/wal/walpb" - - "github.com/coreos/pkg/capnslog" -) - -const ( - metadataType int64 = iota + 1 - entryType - stateType - crcType - snapshotType - - // warnSyncDuration is the amount of time allotted to an fsync before - // logging a warning - warnSyncDuration = time.Second -) - -var ( - // SegmentSizeBytes is the preallocated size of each wal segment file. - // The actual size might be larger than this. In general, the default - // value should be used, but this is defined as an exported variable - // so that tests can set a different segment size. - SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB - - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "wal") - - ErrMetadataConflict = errors.New("wal: conflicting metadata found") - ErrFileNotFound = errors.New("wal: file not found") - ErrCRCMismatch = errors.New("wal: crc mismatch") - ErrSnapshotMismatch = errors.New("wal: snapshot mismatch") - ErrSnapshotNotFound = errors.New("wal: snapshot not found") - crcTable = crc32.MakeTable(crc32.Castagnoli) -) - -// WAL is a logical representation of the stable storage. -// WAL is either in read mode or append mode but not both. -// A newly created WAL is in append mode, and ready for appending records. -// A just opened WAL is in read mode, and ready for reading records. -// The WAL will be ready for appending after reading out all the previous records. -type WAL struct { - dir string // the living directory of the underlay files - - // dirFile is a fd for the wal directory for syncing on Rename - dirFile *os.File - - metadata []byte // metadata recorded at the head of each WAL - state raftpb.HardState // hardstate recorded at the head of WAL - - start walpb.Snapshot // snapshot to start reading - decoder *decoder // decoder to decode records - readClose func() error // closer for decode reader - - mu sync.Mutex - enti uint64 // index of the last entry saved to the wal - encoder *encoder // encoder to encode records - - locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing) - fp *filePipeline -} - -// Create creates a WAL ready for appending records. The given metadata is -// recorded at the head of each WAL file, and can be retrieved with ReadAll. -func Create(dirpath string, metadata []byte) (*WAL, error) { - if Exist(dirpath) { - return nil, os.ErrExist - } - - // keep temporary wal directory so WAL initialization appears atomic - tmpdirpath := filepath.Clean(dirpath) + ".tmp" - if fileutil.Exist(tmpdirpath) { - if err := os.RemoveAll(tmpdirpath); err != nil { - return nil, err - } - } - if err := fileutil.CreateDirAll(tmpdirpath); err != nil { - return nil, err - } - - p := filepath.Join(tmpdirpath, walName(0, 0)) - f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode) - if err != nil { - return nil, err - } - if _, err = f.Seek(0, os.SEEK_END); err != nil { - return nil, err - } - if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil { - return nil, err - } - - w := &WAL{ - dir: dirpath, - metadata: metadata, - } - w.encoder, err = newFileEncoder(f.File, 0) - if err != nil { - return nil, err - } - w.locks = append(w.locks, f) - if err = w.saveCrc(0); err != nil { - return nil, err - } - if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil { - return nil, err - } - if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil { - return nil, err - } - - if w, err = w.renameWal(tmpdirpath); err != nil { - return nil, err - } - - // directory was renamed; sync parent dir to persist rename - pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir)) - if perr != nil { - return nil, perr - } - if perr = fileutil.Fsync(pdir); perr != nil { - return nil, perr - } - if perr = pdir.Close(); err != nil { - return nil, perr - } - - return w, nil -} - -// Open opens the WAL at the given snap. -// The snap SHOULD have been previously saved to the WAL, or the following -// ReadAll will fail. -// The returned WAL is ready to read and the first record will be the one after -// the given snap. The WAL cannot be appended to before reading out all of its -// previous records. -func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) { - w, err := openAtIndex(dirpath, snap, true) - if err != nil { - return nil, err - } - if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil { - return nil, err - } - return w, nil -} - -// OpenForRead only opens the wal files for read. -// Write on a read only wal panics. -func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) { - return openAtIndex(dirpath, snap, false) -} - -func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) { - names, err := readWalNames(dirpath) - if err != nil { - return nil, err - } - - nameIndex, ok := searchIndex(names, snap.Index) - if !ok || !isValidSeq(names[nameIndex:]) { - return nil, ErrFileNotFound - } - - // open the wal files - rcs := make([]io.ReadCloser, 0) - rs := make([]io.Reader, 0) - ls := make([]*fileutil.LockedFile, 0) - for _, name := range names[nameIndex:] { - p := filepath.Join(dirpath, name) - if write { - l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode) - if err != nil { - closeAll(rcs...) - return nil, err - } - ls = append(ls, l) - rcs = append(rcs, l) - } else { - rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode) - if err != nil { - closeAll(rcs...) - return nil, err - } - ls = append(ls, nil) - rcs = append(rcs, rf) - } - rs = append(rs, rcs[len(rcs)-1]) - } - - closer := func() error { return closeAll(rcs...) } - - // create a WAL ready for reading - w := &WAL{ - dir: dirpath, - start: snap, - decoder: newDecoder(rs...), - readClose: closer, - locks: ls, - } - - if write { - // write reuses the file descriptors from read; don't close so - // WAL can append without dropping the file lock - w.readClose = nil - if _, _, err := parseWalName(filepath.Base(w.tail().Name())); err != nil { - closer() - return nil, err - } - w.fp = newFilePipeline(w.dir, SegmentSizeBytes) - } - - return w, nil -} - -// ReadAll reads out records of the current WAL. -// If opened in write mode, it must read out all records until EOF. Or an error -// will be returned. -// If opened in read mode, it will try to read all records if possible. -// If it cannot read out the expected snap, it will return ErrSnapshotNotFound. -// If loaded snap doesn't match with the expected one, it will return -// all the records and error ErrSnapshotMismatch. -// TODO: detect not-last-snap error. -// TODO: maybe loose the checking of match. -// After ReadAll, the WAL will be ready for appending new records. -func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) { - w.mu.Lock() - defer w.mu.Unlock() - - rec := &walpb.Record{} - decoder := w.decoder - - var match bool - for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { - switch rec.Type { - case entryType: - e := mustUnmarshalEntry(rec.Data) - if e.Index > w.start.Index { - ents = append(ents[:e.Index-w.start.Index-1], e) - } - w.enti = e.Index - case stateType: - state = mustUnmarshalState(rec.Data) - case metadataType: - if metadata != nil && !bytes.Equal(metadata, rec.Data) { - state.Reset() - return nil, state, nil, ErrMetadataConflict - } - metadata = rec.Data - case crcType: - crc := decoder.crc.Sum32() - // current crc of decoder must match the crc of the record. - // do no need to match 0 crc, since the decoder is a new one at this case. - if crc != 0 && rec.Validate(crc) != nil { - state.Reset() - return nil, state, nil, ErrCRCMismatch - } - decoder.updateCRC(rec.Crc) - case snapshotType: - var snap walpb.Snapshot - pbutil.MustUnmarshal(&snap, rec.Data) - if snap.Index == w.start.Index { - if snap.Term != w.start.Term { - state.Reset() - return nil, state, nil, ErrSnapshotMismatch - } - match = true - } - default: - state.Reset() - return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type) - } - } - - switch w.tail() { - case nil: - // We do not have to read out all entries in read mode. - // The last record maybe a partial written one, so - // ErrunexpectedEOF might be returned. - if err != io.EOF && err != io.ErrUnexpectedEOF { - state.Reset() - return nil, state, nil, err - } - default: - // We must read all of the entries if WAL is opened in write mode. - if err != io.EOF { - state.Reset() - return nil, state, nil, err - } - // decodeRecord() will return io.EOF if it detects a zero record, - // but this zero record may be followed by non-zero records from - // a torn write. Overwriting some of these non-zero records, but - // not all, will cause CRC errors on WAL open. Since the records - // were never fully synced to disk in the first place, it's safe - // to zero them out to avoid any CRC errors from new writes. - if _, err = w.tail().Seek(w.decoder.lastOffset(), os.SEEK_SET); err != nil { - return nil, state, nil, err - } - if err = fileutil.ZeroToEnd(w.tail().File); err != nil { - return nil, state, nil, err - } - } - - err = nil - if !match { - err = ErrSnapshotNotFound - } - - // close decoder, disable reading - if w.readClose != nil { - w.readClose() - w.readClose = nil - } - w.start = walpb.Snapshot{} - - w.metadata = metadata - - if w.tail() != nil { - // create encoder (chain crc with the decoder), enable appending - w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC()) - if err != nil { - return - } - } - w.decoder = nil - - return metadata, state, ents, err -} - -// cut closes current file written and creates a new one ready to append. -// cut first creates a temp wal file and writes necessary headers into it. -// Then cut atomically rename temp wal file to a wal file. -func (w *WAL) cut() error { - // close old wal file; truncate to avoid wasting space if an early cut - off, serr := w.tail().Seek(0, os.SEEK_CUR) - if serr != nil { - return serr - } - if err := w.tail().Truncate(off); err != nil { - return err - } - if err := w.sync(); err != nil { - return err - } - - fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1)) - - // create a temp wal file with name sequence + 1, or truncate the existing one - newTail, err := w.fp.Open() - if err != nil { - return err - } - - // update writer and save the previous crc - w.locks = append(w.locks, newTail) - prevCrc := w.encoder.crc.Sum32() - w.encoder, err = newFileEncoder(w.tail().File, prevCrc) - if err != nil { - return err - } - if err = w.saveCrc(prevCrc); err != nil { - return err - } - if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil { - return err - } - if err = w.saveState(&w.state); err != nil { - return err - } - // atomically move temp wal file to wal file - if err = w.sync(); err != nil { - return err - } - - off, err = w.tail().Seek(0, os.SEEK_CUR) - if err != nil { - return err - } - - if err = os.Rename(newTail.Name(), fpath); err != nil { - return err - } - if err = fileutil.Fsync(w.dirFile); err != nil { - return err - } - - newTail.Close() - - if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil { - return err - } - if _, err = newTail.Seek(off, os.SEEK_SET); err != nil { - return err - } - - w.locks[len(w.locks)-1] = newTail - - prevCrc = w.encoder.crc.Sum32() - w.encoder, err = newFileEncoder(w.tail().File, prevCrc) - if err != nil { - return err - } - - plog.Infof("segmented wal file %v is created", fpath) - return nil -} - -func (w *WAL) sync() error { - if w.encoder != nil { - if err := w.encoder.flush(); err != nil { - return err - } - } - start := time.Now() - err := fileutil.Fdatasync(w.tail().File) - - duration := time.Since(start) - if duration > warnSyncDuration { - plog.Warningf("sync duration of %v, expected less than %v", duration, warnSyncDuration) - } - syncDurations.Observe(duration.Seconds()) - - return err -} - -// ReleaseLockTo releases the locks, which has smaller index than the given index -// except the largest one among them. -// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release -// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4. -func (w *WAL) ReleaseLockTo(index uint64) error { - w.mu.Lock() - defer w.mu.Unlock() - - var smaller int - found := false - - for i, l := range w.locks { - _, lockIndex, err := parseWalName(filepath.Base(l.Name())) - if err != nil { - return err - } - if lockIndex >= index { - smaller = i - 1 - found = true - break - } - } - - // if no lock index is greater than the release index, we can - // release lock up to the last one(excluding). - if !found && len(w.locks) != 0 { - smaller = len(w.locks) - 1 - } - - if smaller <= 0 { - return nil - } - - for i := 0; i < smaller; i++ { - if w.locks[i] == nil { - continue - } - w.locks[i].Close() - } - w.locks = w.locks[smaller:] - - return nil -} - -func (w *WAL) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.fp != nil { - w.fp.Close() - w.fp = nil - } - - if w.tail() != nil { - if err := w.sync(); err != nil { - return err - } - } - for _, l := range w.locks { - if l == nil { - continue - } - if err := l.Close(); err != nil { - plog.Errorf("failed to unlock during closing wal: %s", err) - } - } - - return w.dirFile.Close() -} - -func (w *WAL) saveEntry(e *raftpb.Entry) error { - // TODO: add MustMarshalTo to reduce one allocation. - b := pbutil.MustMarshal(e) - rec := &walpb.Record{Type: entryType, Data: b} - if err := w.encoder.encode(rec); err != nil { - return err - } - w.enti = e.Index - return nil -} - -func (w *WAL) saveState(s *raftpb.HardState) error { - if raft.IsEmptyHardState(*s) { - return nil - } - w.state = *s - b := pbutil.MustMarshal(s) - rec := &walpb.Record{Type: stateType, Data: b} - return w.encoder.encode(rec) -} - -func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { - w.mu.Lock() - defer w.mu.Unlock() - - // short cut, do not call sync - if raft.IsEmptyHardState(st) && len(ents) == 0 { - return nil - } - - mustSync := mustSync(st, w.state, len(ents)) - - // TODO(xiangli): no more reference operator - for i := range ents { - if err := w.saveEntry(&ents[i]); err != nil { - return err - } - } - if err := w.saveState(&st); err != nil { - return err - } - - curOff, err := w.tail().Seek(0, os.SEEK_CUR) - if err != nil { - return err - } - if curOff < SegmentSizeBytes { - if mustSync { - return w.sync() - } - return nil - } - - return w.cut() -} - -func (w *WAL) SaveSnapshot(e walpb.Snapshot) error { - b := pbutil.MustMarshal(&e) - - w.mu.Lock() - defer w.mu.Unlock() - - rec := &walpb.Record{Type: snapshotType, Data: b} - if err := w.encoder.encode(rec); err != nil { - return err - } - // update enti only when snapshot is ahead of last index - if w.enti < e.Index { - w.enti = e.Index - } - return w.sync() -} - -func (w *WAL) saveCrc(prevCrc uint32) error { - return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc}) -} - -func (w *WAL) tail() *fileutil.LockedFile { - if len(w.locks) > 0 { - return w.locks[len(w.locks)-1] - } - return nil -} - -func (w *WAL) seq() uint64 { - t := w.tail() - if t == nil { - return 0 - } - seq, _, err := parseWalName(filepath.Base(t.Name())) - if err != nil { - plog.Fatalf("bad wal name %s (%v)", t.Name(), err) - } - return seq -} - -func mustSync(st, prevst raftpb.HardState, entsnum int) bool { - // Persistent state on all servers: - // (Updated on stable storage before responding to RPCs) - // currentTerm - // votedFor - // log entries[] - return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term -} - -func closeAll(rcs ...io.ReadCloser) error { - for _, f := range rcs { - if err := f.Close(); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/coreos/etcd/wal/wal_unix.go b/vendor/github.com/coreos/etcd/wal/wal_unix.go deleted file mode 100644 index 82fd6a17a74..00000000000 --- a/vendor/github.com/coreos/etcd/wal/wal_unix.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package wal - -import ( - "os" - - "github.com/coreos/etcd/pkg/fileutil" -) - -func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) { - // On non-Windows platforms, hold the lock while renaming. Releasing - // the lock and trying to reacquire it quickly can be flaky because - // it's possible the process will fork to spawn a process while this is - // happening. The fds are set up as close-on-exec by the Go runtime, - // but there is a window between the fork and the exec where another - // process holds the lock. - - if err := os.RemoveAll(w.dir); err != nil { - return nil, err - } - if err := os.Rename(tmpdirpath, w.dir); err != nil { - return nil, err - } - - w.fp = newFilePipeline(w.dir, SegmentSizeBytes) - df, err := fileutil.OpenDir(w.dir) - w.dirFile = df - return w, err -} diff --git a/vendor/github.com/coreos/etcd/wal/wal_windows.go b/vendor/github.com/coreos/etcd/wal/wal_windows.go deleted file mode 100644 index 0b9e434cf54..00000000000 --- a/vendor/github.com/coreos/etcd/wal/wal_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "os" - - "github.com/coreos/etcd/wal/walpb" -) - -func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) { - // rename of directory with locked files doesn't work on - // windows; close the WAL to release the locks so the directory - // can be renamed - w.Close() - if err := os.Rename(tmpdirpath, w.dir); err != nil { - return nil, err - } - // reopen and relock - newWAL, oerr := Open(w.dir, walpb.Snapshot{}) - if oerr != nil { - return nil, oerr - } - if _, _, _, err := newWAL.ReadAll(); err != nil { - newWAL.Close() - return nil, err - } - return newWAL, nil -} diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.go b/vendor/github.com/coreos/etcd/wal/walpb/record.go deleted file mode 100644 index 30a05e0c139..00000000000 --- a/vendor/github.com/coreos/etcd/wal/walpb/record.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package walpb - -import "errors" - -var ( - ErrCRCMismatch = errors.New("walpb: crc mismatch") -) - -func (rec *Record) Validate(crc uint32) error { - if rec.Crc == crc { - return nil - } - rec.Reset() - return ErrCRCMismatch -} diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go deleted file mode 100644 index e1a77d5e51a..00000000000 --- a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go +++ /dev/null @@ -1,521 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: record.proto -// DO NOT EDIT! - -/* - Package walpb is a generated protocol buffer package. - - It is generated from these files: - record.proto - - It has these top-level messages: - Record - Snapshot -*/ -package walpb - -import ( - "fmt" - - proto "github.com/golang/protobuf/proto" - - math "math" - - io "io" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Record struct { - Type int64 `protobuf:"varint,1,opt,name=type" json:"type"` - Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"` - Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Record) Reset() { *m = Record{} } -func (m *Record) String() string { return proto.CompactTextString(m) } -func (*Record) ProtoMessage() {} -func (*Record) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{0} } - -type Snapshot struct { - Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"` - Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{1} } - -func init() { - proto.RegisterType((*Record)(nil), "walpb.Record") - proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot") -} -func (m *Record) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Record) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintRecord(dAtA, i, uint64(m.Type)) - dAtA[i] = 0x10 - i++ - i = encodeVarintRecord(dAtA, i, uint64(m.Crc)) - if m.Data != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintRecord(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Snapshot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintRecord(dAtA, i, uint64(m.Index)) - dAtA[i] = 0x10 - i++ - i = encodeVarintRecord(dAtA, i, uint64(m.Term)) - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Record(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Record(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintRecord(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Record) Size() (n int) { - var l int - _ = l - n += 1 + sovRecord(uint64(m.Type)) - n += 1 + sovRecord(uint64(m.Crc)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovRecord(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Snapshot) Size() (n int) { - var l int - _ = l - n += 1 + sovRecord(uint64(m.Index)) - n += 1 + sovRecord(uint64(m.Term)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRecord(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRecord(x uint64) (n int) { - return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Record) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Record: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) - } - m.Crc = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Crc |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRecord - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRecord(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRecord - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Snapshot) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRecord(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRecord - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRecord(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRecord - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRecord(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) } - -var fileDescriptorRecord = []byte{ - // 186 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, - 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92, - 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6, - 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, - 0x66, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xc0, 0x22, 0x42, 0x62, 0x5c, 0xcc, 0xc9, 0x45, - 0xc9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xbc, 0x50, 0x09, 0x90, 0x80, 0x90, 0x10, 0x17, 0x4b, 0x4a, - 0x62, 0x49, 0xa2, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, 0xad, 0xe4, 0xc0, 0xc5, 0x11, - 0x9c, 0x97, 0x58, 0x50, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc5, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a, - 0x01, 0x36, 0x92, 0x05, 0xaa, 0x13, 0x22, 0x04, 0xb6, 0x2d, 0xb5, 0x28, 0x17, 0x6c, 0x28, 0x0b, - 0xdc, 0xb6, 0xd4, 0xa2, 0x5c, 0x27, 0x91, 0x13, 0x0f, 0xe5, 0x18, 0x4e, 0x3c, 0x92, 0x63, 0xbc, - 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, 0x00, - 0xff, 0xff, 0x7f, 0x5e, 0x5c, 0x46, 0xd3, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go deleted file mode 100644 index ba4ae31f19b..00000000000 --- a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2014 Docker, Inc. -// Copyright 2015-2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package daemon provides a Go implementation of the sd_notify protocol. -// It can be used to inform systemd of service start-up completion, watchdog -// events, and other status changes. -// -// https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description -package daemon - -import ( - "net" - "os" -) - -const ( - // SdNotifyReady tells the service manager that service startup is finished - // or the service finished loading its configuration. - SdNotifyReady = "READY=1" - - // SdNotifyStopping tells the service manager that the service is beginning - // its shutdown. - SdNotifyStopping = "STOPPING=1" - - // SdNotifyReloading tells the service manager that this service is - // reloading its configuration. Note that you must call SdNotifyReady when - // it completed reloading. - SdNotifyReloading = "RELOADING=1" - - // SdNotifyWatchdog tells the service manager to update the watchdog - // timestamp for the service. - SdNotifyWatchdog = "WATCHDOG=1" -) - -// SdNotify sends a message to the init daemon. It is common to ignore the error. -// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET` -// will be unconditionally unset. -// -// It returns one of the following: -// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset) -// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data) -// (true, nil) - notification supported, data has been sent -func SdNotify(unsetEnvironment bool, state string) (bool, error) { - socketAddr := &net.UnixAddr{ - Name: os.Getenv("NOTIFY_SOCKET"), - Net: "unixgram", - } - - // NOTIFY_SOCKET not set - if socketAddr.Name == "" { - return false, nil - } - - if unsetEnvironment { - if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil { - return false, err - } - } - - conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) - // Error connecting to NOTIFY_SOCKET - if err != nil { - return false, err - } - defer conn.Close() - - if _, err = conn.Write([]byte(state)); err != nil { - return false, err - } - return true, nil -} diff --git a/vendor/github.com/coreos/go-systemd/daemon/watchdog.go b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go deleted file mode 100644 index 7a0e0d3a51b..00000000000 --- a/vendor/github.com/coreos/go-systemd/daemon/watchdog.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package daemon - -import ( - "fmt" - "os" - "strconv" - "time" -) - -// SdWatchdogEnabled returns watchdog information for a service. -// Processes should call daemon.SdNotify(false, daemon.SdNotifyWatchdog) every -// time / 2. -// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` and -// `WATCHDOG_PID` will be unconditionally unset. -// -// It returns one of the following: -// (0, nil) - watchdog isn't enabled or we aren't the watched PID. -// (0, err) - an error happened (e.g. error converting time). -// (time, nil) - watchdog is enabled and we can send ping. -// time is delay before inactive service will be killed. -func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) { - wusec := os.Getenv("WATCHDOG_USEC") - wpid := os.Getenv("WATCHDOG_PID") - if unsetEnvironment { - wusecErr := os.Unsetenv("WATCHDOG_USEC") - wpidErr := os.Unsetenv("WATCHDOG_PID") - if wusecErr != nil { - return 0, wusecErr - } - if wpidErr != nil { - return 0, wpidErr - } - } - - if wusec == "" { - return 0, nil - } - s, err := strconv.Atoi(wusec) - if err != nil { - return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err) - } - if s <= 0 { - return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number") - } - interval := time.Duration(s) * time.Microsecond - - if wpid == "" { - return interval, nil - } - p, err := strconv.Atoi(wpid) - if err != nil { - return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err) - } - if os.Getpid() != p { - return 0, nil - } - - return interval, nil -} diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go deleted file mode 100644 index ef85a3ba245..00000000000 --- a/vendor/github.com/coreos/go-systemd/journal/journal.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package journal provides write bindings to the local systemd journal. -// It is implemented in pure Go and connects to the journal directly over its -// unix socket. -// -// To read from the journal, see the "sdjournal" package, which wraps the -// sd-journal a C API. -// -// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html -package journal - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - "syscall" -) - -// Priority of a journal message -type Priority int - -const ( - PriEmerg Priority = iota - PriAlert - PriCrit - PriErr - PriWarning - PriNotice - PriInfo - PriDebug -) - -var conn net.Conn - -func init() { - var err error - conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") - if err != nil { - conn = nil - } -} - -// Enabled returns true if the local systemd journal is available for logging -func Enabled() bool { - return conn != nil -} - -// Send a message to the local systemd journal. vars is a map of journald -// fields to values. Fields must be composed of uppercase letters, numbers, -// and underscores, but must not start with an underscore. Within these -// restrictions, any arbitrary field name may be used. Some names have special -// significance: see the journalctl documentation -// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) -// for more details. vars may be nil. -func Send(message string, priority Priority, vars map[string]string) error { - if conn == nil { - return journalError("could not connect to journald socket") - } - - data := new(bytes.Buffer) - appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) - appendVariable(data, "MESSAGE", message) - for k, v := range vars { - appendVariable(data, k, v) - } - - _, err := io.Copy(conn, data) - if err != nil && isSocketSpaceError(err) { - file, err := tempFd() - if err != nil { - return journalError(err.Error()) - } - defer file.Close() - _, err = io.Copy(file, data) - if err != nil { - return journalError(err.Error()) - } - - rights := syscall.UnixRights(int(file.Fd())) - - /* this connection should always be a UnixConn, but better safe than sorry */ - unixConn, ok := conn.(*net.UnixConn) - if !ok { - return journalError("can't send file through non-Unix connection") - } - _, _, err = unixConn.WriteMsgUnix([]byte{}, rights, nil) - if err != nil { - return journalError(err.Error()) - } - } else if err != nil { - return journalError(err.Error()) - } - return nil -} - -// Print prints a message to the local systemd journal using Send(). -func Print(priority Priority, format string, a ...interface{}) error { - return Send(fmt.Sprintf(format, a...), priority, nil) -} - -func appendVariable(w io.Writer, name, value string) { - if !validVarName(name) { - journalError("variable name contains invalid character, ignoring") - } - if strings.ContainsRune(value, '\n') { - /* When the value contains a newline, we write: - * - the variable name, followed by a newline - * - the size (in 64bit little endian format) - * - the data, followed by a newline - */ - fmt.Fprintln(w, name) - binary.Write(w, binary.LittleEndian, uint64(len(value))) - fmt.Fprintln(w, value) - } else { - /* just write the variable and value all on one line */ - fmt.Fprintf(w, "%s=%s\n", name, value) - } -} - -func validVarName(name string) bool { - /* The variable name must be in uppercase and consist only of characters, - * numbers and underscores, and may not begin with an underscore. (from the docs) - */ - - valid := name[0] != '_' - for _, c := range name { - valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' - } - return valid -} - -func isSocketSpaceError(err error) bool { - opErr, ok := err.(*net.OpError) - if !ok { - return false - } - - sysErr, ok := opErr.Err.(syscall.Errno) - if !ok { - return false - } - - return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS -} - -func tempFd() (*os.File, error) { - file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") - if err != nil { - return nil, err - } - err = syscall.Unlink(file.Name()) - if err != nil { - return nil, err - } - return file, nil -} - -func journalError(s string) error { - s = "journal error: " + s - fmt.Fprintln(os.Stderr, s) - return errors.New(s) -} diff --git a/vendor/github.com/coreos/ignition/config/v1/cloudinit.go b/vendor/github.com/coreos/ignition/config/v1/cloudinit.go deleted file mode 100644 index 7cfeb455938..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/cloudinit.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// These functions are copied from github.com/coreos/coreos-cloudinit/config. - -package v1 - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "strings" - "unicode" -) - -func isCloudConfig(userdata []byte) bool { - header := strings.SplitN(string(decompressIfGzipped(userdata)), "\n", 2)[0] - - // Trim trailing whitespaces - header = strings.TrimRightFunc(header, unicode.IsSpace) - - return (header == "#cloud-config") -} - -func isScript(userdata []byte) bool { - header := strings.SplitN(string(decompressIfGzipped(userdata)), "\n", 2)[0] - return strings.HasPrefix(header, "#!") -} - -func decompressIfGzipped(data []byte) []byte { - if reader, err := gzip.NewReader(bytes.NewReader(data)); err == nil { - uncompressedData, err := ioutil.ReadAll(reader) - reader.Close() - if err == nil { - return uncompressedData - } else { - return data - } - } else { - return data - } -} diff --git a/vendor/github.com/coreos/ignition/config/v1/config.go b/vendor/github.com/coreos/ignition/config/v1/config.go deleted file mode 100644 index 21e79f81e54..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/config.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/util" - "github.com/coreos/ignition/config/v1/types" - "github.com/coreos/ignition/config/validate" - "github.com/coreos/ignition/config/validate/report" - - json "github.com/ajeddeloh/go-json" -) - -func Parse(rawConfig []byte) (types.Config, report.Report, error) { - if isEmpty(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrEmpty - } else if isCloudConfig(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrCloudConfig - } else if isScript(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrScript - } - - var err error - var config types.Config - - err = json.Unmarshal(rawConfig, &config) - if err != nil { - rpt, err := util.HandleParseErrors(rawConfig) - // HandleParseErrors always returns an error - return types.Config{}, rpt, err - } - - if config.Version != types.Version { - return types.Config{}, report.Report{}, errors.ErrUnknownVersion - } - - rpt := validate.ValidateConfig(rawConfig, config) - if rpt.IsFatal() { - return types.Config{}, rpt, errors.ErrInvalid - } - return config, rpt, nil -} - -func isEmpty(userdata []byte) bool { - return len(userdata) == 0 -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/config.go b/vendor/github.com/coreos/ignition/config/v1/types/config.go deleted file mode 100644 index f9215699cbd..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/config.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -const ( - Version = 1 -) - -type Config struct { - Version int `json:"ignitionVersion"` - Storage Storage `json:"storage,omitempty"` - Systemd Systemd `json:"systemd,omitempty"` - Networkd Networkd `json:"networkd,omitempty"` - Passwd Passwd `json:"passwd,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/disk.go b/vendor/github.com/coreos/ignition/config/v1/types/disk.go deleted file mode 100644 index 62517856dc0..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/disk.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Disk struct { - Device Path `json:"device,omitempty"` - WipeTable bool `json:"wipeTable,omitempty"` - Partitions []Partition `json:"partitions,omitempty"` -} - -func (n Disk) Validate() report.Report { - r := report.Report{} - if len(n.Device) == 0 { - r.Add(report.Entry{ - Kind: report.EntryError, - Message: errors.ErrDiskDeviceRequired.Error(), - }) - } - if n.partitionNumbersCollide() { - r.Add(report.Entry{ - Kind: report.EntryError, - Message: errors.ErrPartitionNumbersCollide.Error(), - }) - } - if n.partitionsOverlap() { - r.Add(report.Entry{ - Kind: report.EntryError, - Message: errors.ErrPartitionsOverlap.Error(), - }) - } - if n.partitionsMisaligned() { - r.Add(report.Entry{ - Kind: report.EntryError, - Message: errors.ErrPartitionsMisaligned.Error(), - }) - } - // Disks which get to this point will likely succeed in sgdisk - return r -} - -// partitionNumbersCollide returns true if partition numbers in n.Partitions are not unique. -func (n Disk) partitionNumbersCollide() bool { - m := map[int][]Partition{} - for _, p := range n.Partitions { - m[p.Number] = append(m[p.Number], p) - } - for _, n := range m { - if len(n) > 1 { - // TODO(vc): return information describing the collision for logging - return true - } - } - return false -} - -// end returns the last sector of a partition. -func (p Partition) end() PartitionDimension { - if p.Size == 0 { - // a size of 0 means "fill available", just return the start as the end for those. - return p.Start - } - return p.Start + p.Size - 1 -} - -// partitionsOverlap returns true if any explicitly dimensioned partitions overlap -func (n Disk) partitionsOverlap() bool { - for _, p := range n.Partitions { - // Starts of 0 are placed by sgdisk into the "largest available block" at that time. - // We aren't going to check those for overlap since we don't have the disk geometry. - if p.Start == 0 { - continue - } - - for _, o := range n.Partitions { - if p == o || o.Start == 0 { - continue - } - - // is p.Start within o? - if p.Start >= o.Start && p.Start <= o.end() { - return true - } - - // is p.end() within o? - if p.end() >= o.Start && p.end() <= o.end() { - return true - } - - // do p.Start and p.end() straddle o? - if p.Start < o.Start && p.end() > o.end() { - return true - } - } - } - return false -} - -// partitionsMisaligned returns true if any of the partitions don't start on a 2048-sector (1MiB) boundary. -func (n Disk) partitionsMisaligned() bool { - for _, p := range n.Partitions { - if (p.Start & (2048 - 1)) != 0 { - return true - } - } - return false -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/file.go b/vendor/github.com/coreos/ignition/config/v1/types/file.go deleted file mode 100644 index 8775c19fd5e..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/file.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "os" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type FileMode os.FileMode - -type File struct { - Path Path `json:"path,omitempty"` - Contents string `json:"contents,omitempty"` - Mode FileMode `json:"mode,omitempty"` - Uid int `json:"uid,omitempty"` - Gid int `json:"gid,omitempty"` -} - -func (m FileMode) Validate() report.Report { - if (m &^ 07777) != 0 { - return report.ReportFromError(errors.ErrFileIllegalMode, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/filesystem.go b/vendor/github.com/coreos/ignition/config/v1/types/filesystem.go deleted file mode 100644 index 7986bd724cc..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/filesystem.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Filesystem struct { - Device Path `json:"device,omitempty"` - Format FilesystemFormat `json:"format,omitempty"` - Create *FilesystemCreate `json:"create,omitempty"` - Files []File `json:"files,omitempty"` -} - -type FilesystemCreate struct { - Force bool `json:"force,omitempty"` - Options MkfsOptions `json:"options,omitempty"` -} - -type FilesystemFormat string - -func (f FilesystemFormat) Validate() report.Report { - switch f { - case "ext4", "btrfs", "xfs": - return report.Report{} - default: - return report.ReportFromError(errors.ErrFilesystemInvalidFormat, report.EntryError) - } -} - -type MkfsOptions []string diff --git a/vendor/github.com/coreos/ignition/config/v1/types/group.go b/vendor/github.com/coreos/ignition/config/v1/types/group.go deleted file mode 100644 index 27e51048870..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/group.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type Group struct { - Name string `json:"name,omitempty"` - Gid *uint `json:"gid,omitempty"` - PasswordHash string `json:"passwordHash,omitempty"` - System bool `json:"system,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/networkd.go b/vendor/github.com/coreos/ignition/config/v1/types/networkd.go deleted file mode 100644 index 470c721106a..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/networkd.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type Networkd struct { - Units []NetworkdUnit `json:"units,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/partition.go b/vendor/github.com/coreos/ignition/config/v1/types/partition.go deleted file mode 100644 index 16270de2cf8..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/partition.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "regexp" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Partition struct { - Label PartitionLabel `json:"label,omitempty"` - Number int `json:"number"` - Size PartitionDimension `json:"size"` - Start PartitionDimension `json:"start"` - TypeGUID PartitionTypeGUID `json:"typeGuid,omitempty"` -} - -type PartitionLabel string - -func (n PartitionLabel) Validate() report.Report { - // http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries: - // 56 (0x38) 72 bytes Partition name (36 UTF-16LE code units) - - // XXX(vc): note GPT calls it a name, we're using label for consistency - // with udev naming /dev/disk/by-partlabel/*. - if len(string(n)) > 36 { - return report.ReportFromError(errors.ErrLabelTooLong, report.EntryError) - } - return report.Report{} -} - -type PartitionDimension uint64 - -type PartitionTypeGUID string - -func (d PartitionTypeGUID) Validate() report.Report { - ok, err := regexp.MatchString("^(|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$", string(d)) - if err != nil { - return report.ReportFromError(fmt.Errorf("error matching type-guid regexp: %v", err), report.EntryError) - } - if !ok { - return report.ReportFromError(fmt.Errorf(`partition type-guid must have the form "01234567-89AB-CDEF-EDCB-A98765432101", got: %q`, string(d)), report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/passwd.go b/vendor/github.com/coreos/ignition/config/v1/types/passwd.go deleted file mode 100644 index 0ffff43bb84..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/passwd.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type Passwd struct { - Users []User `json:"users,omitempty"` - Groups []Group `json:"groups,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/path.go b/vendor/github.com/coreos/ignition/config/v1/types/path.go deleted file mode 100644 index e37341c1ace..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/path.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "path" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Path string - -func (d Path) Validate() report.Report { - if !path.IsAbs(string(d)) { - return report.ReportFromError(errors.ErrPathRelative, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/raid.go b/vendor/github.com/coreos/ignition/config/v1/types/raid.go deleted file mode 100644 index 329b123e6d0..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/raid.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Raid struct { - Name string `json:"name"` - Level string `json:"level"` - Devices []Path `json:"devices,omitempty"` - Spares int `json:"spares,omitempty"` -} - -func (n Raid) Validate() report.Report { - switch n.Level { - case "linear", "raid0", "0", "stripe": - if n.Spares != 0 { - return report.ReportFromError(errors.ErrSparesUnsupportedForLevel, report.EntryError) - } - case "raid1", "1", "mirror": - case "raid4", "4": - case "raid5", "5": - case "raid6", "6": - case "raid10", "10": - default: - return report.ReportFromError(errors.ErrUnrecognizedRaidLevel, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/storage.go b/vendor/github.com/coreos/ignition/config/v1/types/storage.go deleted file mode 100644 index 2649751a7d0..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/storage.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type Storage struct { - Disks []Disk `json:"disks,omitempty"` - Arrays []Raid `json:"raid,omitempty"` - Filesystems []Filesystem `json:"filesystems,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/systemd.go b/vendor/github.com/coreos/ignition/config/v1/types/systemd.go deleted file mode 100644 index 97194b91150..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/systemd.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type Systemd struct { - Units []SystemdUnit `json:"units,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/unit.go b/vendor/github.com/coreos/ignition/config/v1/types/unit.go deleted file mode 100644 index 5e983cc1456..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/unit.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "path" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type SystemdUnit struct { - Name SystemdUnitName `json:"name,omitempty"` - Enable bool `json:"enable,omitempty"` - Mask bool `json:"mask,omitempty"` - Contents string `json:"contents,omitempty"` - DropIns []SystemdUnitDropIn `json:"dropins,omitempty"` -} - -type SystemdUnitDropIn struct { - Name SystemdUnitDropInName `json:"name,omitempty"` - Contents string `json:"contents,omitempty"` -} - -type SystemdUnitName string - -func (n SystemdUnitName) Validate() report.Report { - switch path.Ext(string(n)) { - case ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice", ".scope": - return report.Report{} - default: - return report.ReportFromError(errors.ErrInvalidSystemdExt, report.EntryError) - } -} - -type SystemdUnitDropInName string - -func (n SystemdUnitDropInName) Validate() report.Report { - switch path.Ext(string(n)) { - case ".conf": - return report.Report{} - default: - return report.ReportFromError(errors.ErrInvalidSystemdDropinExt, report.EntryError) - } -} - -type NetworkdUnit struct { - Name NetworkdUnitName `json:"name,omitempty"` - Contents string `json:"contents,omitempty"` -} - -type NetworkdUnitName string - -func (n NetworkdUnitName) Validate() report.Report { - switch path.Ext(string(n)) { - case ".link", ".netdev", ".network": - return report.Report{} - default: - return report.ReportFromError(errors.ErrInvalidNetworkdExt, report.EntryError) - } -} diff --git a/vendor/github.com/coreos/ignition/config/v1/types/user.go b/vendor/github.com/coreos/ignition/config/v1/types/user.go deleted file mode 100644 index f6653e27494..00000000000 --- a/vendor/github.com/coreos/ignition/config/v1/types/user.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type User struct { - Name string `json:"name,omitempty"` - PasswordHash string `json:"passwordHash,omitempty"` - SSHAuthorizedKeys []string `json:"sshAuthorizedKeys,omitempty"` - Create *UserCreate `json:"create,omitempty"` -} - -type UserCreate struct { - Uid *uint `json:"uid,omitempty"` - GECOS string `json:"gecos,omitempty"` - Homedir string `json:"homeDir,omitempty"` - NoCreateHome bool `json:"noCreateHome,omitempty"` - PrimaryGroup string `json:"primaryGroup,omitempty"` - Groups []string `json:"groups,omitempty"` - NoUserGroup bool `json:"noUserGroup,omitempty"` - System bool `json:"system,omitempty"` - NoLogInit bool `json:"noLogInit,omitempty"` - Shell string `json:"shell,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/append.go b/vendor/github.com/coreos/ignition/config/v2_0/append.go deleted file mode 100644 index cee6bc412e2..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/append.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2_0 - -import ( - "reflect" - - "github.com/coreos/ignition/config/v2_0/types" -) - -// Append appends newConfig to oldConfig and returns the result. Appending one -// config to another is accomplished by iterating over every field in the -// config structure, appending slices, recursively appending structs, and -// overwriting old values with new values for all other types. -func Append(oldConfig, newConfig types.Config) types.Config { - vOld := reflect.ValueOf(oldConfig) - vNew := reflect.ValueOf(newConfig) - - vResult := appendStruct(vOld, vNew) - - return vResult.Interface().(types.Config) -} - -// appendStruct is an internal helper function to AppendConfig. Given two values -// of structures (assumed to be the same type), recursively iterate over every -// field in the struct, appending slices, recursively appending structs, and -// overwriting old values with the new for all other types. Individual fields -// are able to override their merge strategy using the "merge" tag. Accepted -// values are "new" or "old": "new" uses the new value, "old" uses the old -// value. These are currently only used for "ignition.config" and -// "ignition.version". -func appendStruct(vOld, vNew reflect.Value) reflect.Value { - tOld := vOld.Type() - vRes := reflect.New(tOld) - - for i := 0; i < tOld.NumField(); i++ { - vfOld := vOld.Field(i) - vfNew := vNew.Field(i) - vfRes := vRes.Elem().Field(i) - - switch tOld.Field(i).Tag.Get("merge") { - case "old": - vfRes.Set(vfOld) - continue - case "new": - vfRes.Set(vfNew) - continue - } - - switch vfOld.Type().Kind() { - case reflect.Struct: - vfRes.Set(appendStruct(vfOld, vfNew)) - case reflect.Slice: - vfRes.Set(reflect.AppendSlice(vfOld, vfNew)) - default: - vfRes.Set(vfNew) - } - } - - return vRes.Elem() -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/cloudinit.go b/vendor/github.com/coreos/ignition/config/v2_0/cloudinit.go deleted file mode 100644 index 9e1f2ad0e7c..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/cloudinit.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// These functions are copied from github.com/coreos/coreos-cloudinit/config. - -package v2_0 - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "strings" - "unicode" -) - -func isCloudConfig(userdata []byte) bool { - header := strings.SplitN(string(decompressIfGzipped(userdata)), "\n", 2)[0] - - // Trim trailing whitespaces - header = strings.TrimRightFunc(header, unicode.IsSpace) - - return (header == "#cloud-config") -} - -func isScript(userdata []byte) bool { - header := strings.SplitN(string(decompressIfGzipped(userdata)), "\n", 2)[0] - return strings.HasPrefix(header, "#!") -} - -func decompressIfGzipped(data []byte) []byte { - if reader, err := gzip.NewReader(bytes.NewReader(data)); err == nil { - uncompressedData, err := ioutil.ReadAll(reader) - reader.Close() - if err == nil { - return uncompressedData - } else { - return data - } - } else { - return data - } -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/config.go b/vendor/github.com/coreos/ignition/config/v2_0/config.go deleted file mode 100644 index f1385bf1ca9..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/config.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2_0 - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/v1" - "github.com/coreos/ignition/config/v2_0/types" - "github.com/coreos/ignition/config/validate" - "github.com/coreos/ignition/config/validate/report" - - json "github.com/ajeddeloh/go-json" - "github.com/coreos/go-semver/semver" -) - -// Parse parses the raw config into a types.Config struct and generates a report of any -// errors, warnings, info, and deprecations it encountered -func Parse(rawConfig []byte) (types.Config, report.Report, error) { - if isEmpty(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrEmpty - } else if isCloudConfig(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrCloudConfig - } else if isScript(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrScript - } - - var err error - var config types.Config - - err = json.Unmarshal(rawConfig, &config) - - if err != nil || semver.Version(config.Ignition.Version).LessThan(types.MaxVersion) { - // We can fail unmarshaling if it's an older config. Attempt to parse - // it as such. - config, rpt, err := v1.Parse(rawConfig) - if err != nil { - return types.Config{}, rpt, err - } - - rpt.Merge(report.ReportFromError(errors.ErrDeprecated, report.EntryDeprecated)) - return TranslateFromV1(config), rpt, err - } - - if semver.Version(config.Ignition.Version) != types.MaxVersion { - return types.Config{}, report.Report{}, errors.ErrUnknownVersion - } - - rpt := validate.ValidateConfig(rawConfig, config) - if rpt.IsFatal() { - return types.Config{}, rpt, errors.ErrInvalid - } - - return config, rpt, nil -} - -func isEmpty(userdata []byte) bool { - return len(userdata) == 0 -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/translate.go b/vendor/github.com/coreos/ignition/config/v2_0/translate.go deleted file mode 100644 index 832adce566d..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/translate.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2_0 - -import ( - "fmt" - - v1 "github.com/coreos/ignition/config/v1/types" - "github.com/coreos/ignition/config/v2_0/types" - "github.com/vincent-petithory/dataurl" -) - -func TranslateFromV1(old v1.Config) types.Config { - config := types.Config{ - Ignition: types.Ignition{ - Version: types.IgnitionVersion(types.MaxVersion), - }, - } - - for _, oldDisk := range old.Storage.Disks { - disk := types.Disk{ - Device: types.Path(oldDisk.Device), - WipeTable: oldDisk.WipeTable, - } - - for _, oldPartition := range oldDisk.Partitions { - disk.Partitions = append(disk.Partitions, types.Partition{ - Label: types.PartitionLabel(oldPartition.Label), - Number: oldPartition.Number, - Size: types.PartitionDimension(oldPartition.Size), - Start: types.PartitionDimension(oldPartition.Start), - TypeGUID: types.PartitionTypeGUID(oldPartition.TypeGUID), - }) - } - - config.Storage.Disks = append(config.Storage.Disks, disk) - } - - for _, oldArray := range old.Storage.Arrays { - array := types.Raid{ - Name: oldArray.Name, - Level: oldArray.Level, - Spares: oldArray.Spares, - } - - for _, oldDevice := range oldArray.Devices { - array.Devices = append(array.Devices, types.Path(oldDevice)) - } - - config.Storage.Arrays = append(config.Storage.Arrays, array) - } - - for i, oldFilesystem := range old.Storage.Filesystems { - filesystem := types.Filesystem{ - Name: fmt.Sprintf("_translate-filesystem-%d", i), - Mount: &types.FilesystemMount{ - Device: types.Path(oldFilesystem.Device), - Format: types.FilesystemFormat(oldFilesystem.Format), - }, - } - - if oldFilesystem.Create != nil { - filesystem.Mount.Create = &types.FilesystemCreate{ - Force: oldFilesystem.Create.Force, - Options: types.MkfsOptions(oldFilesystem.Create.Options), - } - } - - config.Storage.Filesystems = append(config.Storage.Filesystems, filesystem) - - for _, oldFile := range oldFilesystem.Files { - file := types.File{ - Filesystem: filesystem.Name, - Path: types.Path(oldFile.Path), - User: types.FileUser{Id: oldFile.Uid}, - Group: types.FileGroup{Id: oldFile.Gid}, - Mode: types.FileMode(oldFile.Mode), - Contents: types.FileContents{ - Source: types.Url{ - Scheme: "data", - Opaque: "," + dataurl.EscapeString(oldFile.Contents), - }, - }, - } - - config.Storage.Files = append(config.Storage.Files, file) - } - } - - for _, oldUnit := range old.Systemd.Units { - unit := types.SystemdUnit{ - Name: types.SystemdUnitName(oldUnit.Name), - Enable: oldUnit.Enable, - Mask: oldUnit.Mask, - Contents: oldUnit.Contents, - } - - for _, oldDropIn := range oldUnit.DropIns { - unit.DropIns = append(unit.DropIns, types.SystemdUnitDropIn{ - Name: types.SystemdUnitDropInName(oldDropIn.Name), - Contents: oldDropIn.Contents, - }) - } - - config.Systemd.Units = append(config.Systemd.Units, unit) - } - - for _, oldUnit := range old.Networkd.Units { - config.Networkd.Units = append(config.Networkd.Units, types.NetworkdUnit{ - Name: types.NetworkdUnitName(oldUnit.Name), - Contents: oldUnit.Contents, - }) - } - - for _, oldUser := range old.Passwd.Users { - user := types.User{ - Name: oldUser.Name, - PasswordHash: oldUser.PasswordHash, - SSHAuthorizedKeys: oldUser.SSHAuthorizedKeys, - } - - if oldUser.Create != nil { - var uid *uint - if oldUser.Create.Uid != nil { - tmp := uint(*oldUser.Create.Uid) - uid = &tmp - } - - user.Create = &types.UserCreate{ - Uid: uid, - GECOS: oldUser.Create.GECOS, - Homedir: oldUser.Create.Homedir, - NoCreateHome: oldUser.Create.NoCreateHome, - PrimaryGroup: oldUser.Create.PrimaryGroup, - Groups: oldUser.Create.Groups, - NoUserGroup: oldUser.Create.NoUserGroup, - System: oldUser.Create.System, - NoLogInit: oldUser.Create.NoLogInit, - Shell: oldUser.Create.Shell, - } - } - - config.Passwd.Users = append(config.Passwd.Users, user) - } - - for _, oldGroup := range old.Passwd.Groups { - var gid *uint - if oldGroup.Gid != nil { - tmp := uint(*oldGroup.Gid) - gid = &tmp - } - config.Passwd.Groups = append(config.Passwd.Groups, types.Group{ - Name: oldGroup.Name, - Gid: gid, - PasswordHash: oldGroup.PasswordHash, - System: oldGroup.System, - }) - } - - return config -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/compression.go b/vendor/github.com/coreos/ignition/config/v2_0/types/compression.go deleted file mode 100644 index f56e5b9c80b..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/compression.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Compression string - -func (c Compression) Validate() report.Report { - switch c { - case "", "gzip": - default: - return report.ReportFromError(errors.ErrCompressionInvalid, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/config.go b/vendor/github.com/coreos/ignition/config/v2_0/types/config.go deleted file mode 100644 index 38551589175..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/config.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - - "github.com/coreos/go-semver/semver" - - "github.com/coreos/ignition/config/validate/report" -) - -var ( - MaxVersion = semver.Version{ - Major: 2, - Minor: 0, - } -) - -type Config struct { - Ignition Ignition `json:"ignition"` - Storage Storage `json:"storage,omitempty"` - Systemd Systemd `json:"systemd,omitempty"` - Networkd Networkd `json:"networkd,omitempty"` - Passwd Passwd `json:"passwd,omitempty"` -} - -func (c Config) Validate() report.Report { - r := report.Report{} - rules := []rule{ - checkFilesFilesystems, - checkDuplicateFilesystems, - } - - for _, rule := range rules { - rule(c, &r) - } - return r -} - -type rule func(cfg Config, report *report.Report) - -func checkFilesFilesystems(cfg Config, r *report.Report) { - filesystems := map[string]struct{}{"root": {}} - for _, filesystem := range cfg.Storage.Filesystems { - filesystems[filesystem.Name] = struct{}{} - } - for _, file := range cfg.Storage.Files { - if file.Filesystem == "" { - // Filesystem was not specified. This is an error, but its handled in types.File's Validate, not here - continue - } - _, ok := filesystems[file.Filesystem] - if !ok { - r.Add(report.Entry{ - Kind: report.EntryWarning, - Message: fmt.Sprintf("File %q references nonexistent filesystem %q. (This is ok if it is defined in a referenced config)", - file.Path, file.Filesystem), - }) - } - } -} - -func checkDuplicateFilesystems(cfg Config, r *report.Report) { - filesystems := map[string]struct{}{"root": {}} - for _, filesystem := range cfg.Storage.Filesystems { - if _, ok := filesystems[filesystem.Name]; ok { - r.Add(report.Entry{ - Kind: report.EntryWarning, - Message: fmt.Sprintf("Filesystem %q shadows exising filesystem definition", filesystem.Name), - }) - } - filesystems[filesystem.Name] = struct{}{} - } -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/disk.go b/vendor/github.com/coreos/ignition/config/v2_0/types/disk.go deleted file mode 100644 index b68c5c930ed..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/disk.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Disk struct { - Device Path `json:"device,omitempty"` - WipeTable bool `json:"wipeTable,omitempty"` - Partitions []Partition `json:"partitions,omitempty"` -} - -func (n Disk) Validate() report.Report { - r := report.Report{} - if len(n.Device) == 0 { - r.Add(report.Entry{ - Message: errors.ErrDiskDeviceRequired.Error(), - Kind: report.EntryError, - }) - } - if n.partitionNumbersCollide() { - r.Add(report.Entry{ - Message: errors.ErrPartitionNumbersCollide.Error(), - Kind: report.EntryError, - }) - } - if n.partitionsOverlap() { - r.Add(report.Entry{ - Message: errors.ErrPartitionsOverlap.Error(), - Kind: report.EntryError, - }) - } - if n.partitionsMisaligned() { - r.Add(report.Entry{ - Message: errors.ErrPartitionsMisaligned.Error(), - Kind: report.EntryError, - }) - } - // Disks which have no errors at this point will likely succeed in sgdisk - return r -} - -// partitionNumbersCollide returns true if partition numbers in n.Partitions are not unique. -func (n Disk) partitionNumbersCollide() bool { - m := map[int][]Partition{} - for _, p := range n.Partitions { - if p.Number != 0 { - // a number of 0 means next available number, multiple devices can specify this - m[p.Number] = append(m[p.Number], p) - } - } - for _, n := range m { - if len(n) > 1 { - // TODO(vc): return information describing the collision for logging - return true - } - } - return false -} - -// end returns the last sector of a partition. -func (p Partition) end() PartitionDimension { - if p.Size == 0 { - // a size of 0 means "fill available", just return the start as the end for those. - return p.Start - } - return p.Start + p.Size - 1 -} - -// partitionsOverlap returns true if any explicitly dimensioned partitions overlap -func (n Disk) partitionsOverlap() bool { - for _, p := range n.Partitions { - // Starts of 0 are placed by sgdisk into the "largest available block" at that time. - // We aren't going to check those for overlap since we don't have the disk geometry. - if p.Start == 0 { - continue - } - - for _, o := range n.Partitions { - if p == o || o.Start == 0 { - continue - } - - // is p.Start within o? - if p.Start >= o.Start && p.Start <= o.end() { - return true - } - - // is p.end() within o? - if p.end() >= o.Start && p.end() <= o.end() { - return true - } - - // do p.Start and p.end() straddle o? - if p.Start < o.Start && p.end() > o.end() { - return true - } - } - } - return false -} - -// partitionsMisaligned returns true if any of the partitions don't start on a 2048-sector (1MiB) boundary. -func (n Disk) partitionsMisaligned() bool { - for _, p := range n.Partitions { - if (p.Start & (2048 - 1)) != 0 { - return true - } - } - return false -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/file.go b/vendor/github.com/coreos/ignition/config/v2_0/types/file.go deleted file mode 100644 index 8d3e79054de..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/file.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "os" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type File struct { - Filesystem string `json:"filesystem,omitempty"` - Path Path `json:"path,omitempty"` - Contents FileContents `json:"contents,omitempty"` - Mode FileMode `json:"mode,omitempty"` - User FileUser `json:"user,omitempty"` - Group FileGroup `json:"group,omitempty"` -} - -func (f File) Validate() report.Report { - if f.Filesystem == "" { - return report.ReportFromError(errors.ErrNoFilesystem, report.EntryError) - } - return report.Report{} -} - -type FileUser struct { - Id int `json:"id,omitempty"` -} - -type FileGroup struct { - Id int `json:"id,omitempty"` -} - -type FileContents struct { - Compression Compression `json:"compression,omitempty"` - Source Url `json:"source,omitempty"` - Verification Verification `json:"verification,omitempty"` -} - -type FileMode os.FileMode - -func (m FileMode) Validate() report.Report { - if (m &^ 07777) != 0 { - return report.ReportFromError(errors.ErrFileIllegalMode, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/filesystem.go b/vendor/github.com/coreos/ignition/config/v2_0/types/filesystem.go deleted file mode 100644 index e3572711da8..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/filesystem.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Filesystem struct { - Name string `json:"name,omitempty"` - Mount *FilesystemMount `json:"mount,omitempty"` - Path *Path `json:"path,omitempty"` -} - -type FilesystemMount struct { - Device Path `json:"device,omitempty"` - Format FilesystemFormat `json:"format,omitempty"` - Create *FilesystemCreate `json:"create,omitempty"` -} - -type FilesystemCreate struct { - Force bool `json:"force,omitempty"` - Options MkfsOptions `json:"options,omitempty"` -} - -func (f Filesystem) Validate() report.Report { - if f.Mount == nil && f.Path == nil { - return report.ReportFromError(errors.ErrFilesystemNoMountPath, report.EntryError) - } - if f.Mount != nil && f.Path != nil { - return report.ReportFromError(errors.ErrFilesystemMountAndPath, report.EntryError) - } - return report.Report{} -} - -type FilesystemFormat string - -func (f FilesystemFormat) Validate() report.Report { - switch f { - case "ext4", "btrfs", "xfs": - return report.Report{} - default: - return report.ReportFromError(errors.ErrFilesystemInvalidFormat, report.EntryError) - } -} - -type MkfsOptions []string diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/group.go b/vendor/github.com/coreos/ignition/config/v2_0/types/group.go deleted file mode 100644 index 27e51048870..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/group.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type Group struct { - Name string `json:"name,omitempty"` - Gid *uint `json:"gid,omitempty"` - PasswordHash string `json:"passwordHash,omitempty"` - System bool `json:"system,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/hash.go b/vendor/github.com/coreos/ignition/config/v2_0/types/hash.go deleted file mode 100644 index 628524dc6d1..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/hash.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "crypto" - "encoding/hex" - "encoding/json" - "strings" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Hash struct { - Function string - Sum string -} - -func (h *Hash) UnmarshalJSON(data []byte) error { - var th string - if err := json.Unmarshal(data, &th); err != nil { - return err - } - - parts := strings.SplitN(th, "-", 2) - if len(parts) != 2 { - return errors.ErrHashMalformed - } - - h.Function = parts[0] - h.Sum = parts[1] - - return nil -} - -func (h Hash) MarshalJSON() ([]byte, error) { - return []byte(`"` + h.Function + "-" + h.Sum + `"`), nil -} - -func (h Hash) String() string { - bytes, _ := h.MarshalJSON() - return string(bytes) -} - -func (h Hash) Validate() report.Report { - var hash crypto.Hash - switch h.Function { - case "sha512": - hash = crypto.SHA512 - default: - return report.ReportFromError(errors.ErrHashUnrecognized, report.EntryError) - } - - if len(h.Sum) != hex.EncodedLen(hash.Size()) { - return report.ReportFromError(errors.ErrHashWrongSize, report.EntryError) - } - - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/ignition.go b/vendor/github.com/coreos/ignition/config/v2_0/types/ignition.go deleted file mode 100644 index deeb822d09e..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/ignition.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "encoding/json" - - "github.com/coreos/go-semver/semver" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Ignition struct { - Version IgnitionVersion `json:"version,omitempty" merge:"old"` - Config IgnitionConfig `json:"config,omitempty" merge:"new"` -} - -type IgnitionConfig struct { - Append []ConfigReference `json:"append,omitempty"` - Replace *ConfigReference `json:"replace,omitempty"` -} - -type ConfigReference struct { - Source Url `json:"source,omitempty"` - Verification Verification `json:"verification,omitempty"` -} - -type IgnitionVersion semver.Version - -func (v *IgnitionVersion) UnmarshalJSON(data []byte) error { - tv := semver.Version(*v) - if err := json.Unmarshal(data, &tv); err != nil { - return err - } - *v = IgnitionVersion(tv) - return nil -} - -func (v IgnitionVersion) MarshalJSON() ([]byte, error) { - return semver.Version(v).MarshalJSON() -} - -func (v IgnitionVersion) Validate() report.Report { - if MaxVersion.Major > v.Major { - return report.ReportFromError(errors.ErrOldVersion, report.EntryError) - } - if MaxVersion.LessThan(semver.Version(v)) { - return report.ReportFromError(errors.ErrNewVersion, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/networkd.go b/vendor/github.com/coreos/ignition/config/v2_0/types/networkd.go deleted file mode 100644 index 470c721106a..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/networkd.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type Networkd struct { - Units []NetworkdUnit `json:"units,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/partition.go b/vendor/github.com/coreos/ignition/config/v2_0/types/partition.go deleted file mode 100644 index c36545d4a63..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/partition.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "regexp" - "strings" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Partition struct { - Label PartitionLabel `json:"label,omitempty"` - Number int `json:"number"` - Size PartitionDimension `json:"size"` - Start PartitionDimension `json:"start"` - TypeGUID PartitionTypeGUID `json:"typeGuid,omitempty"` -} - -type PartitionLabel string - -func (n PartitionLabel) Validate() report.Report { - // http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries: - // 56 (0x38) 72 bytes Partition name (36 UTF-16LE code units) - - // XXX(vc): note GPT calls it a name, we're using label for consistency - // with udev naming /dev/disk/by-partlabel/*. - if len(string(n)) > 36 { - return report.ReportFromError(errors.ErrLabelTooLong, report.EntryError) - } - if strings.Contains(string(n), ":") { - return report.ReportFromError(errors.ErrLabelContainsColon, report.EntryWarning) - } - return report.Report{} -} - -type PartitionDimension uint64 - -type PartitionTypeGUID string - -func (d PartitionTypeGUID) Validate() report.Report { - ok, err := regexp.MatchString("^(|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$", string(d)) - if err != nil { - return report.ReportFromError(fmt.Errorf("error matching type-guid regexp: %v", err), report.EntryError) - } - if !ok { - return report.ReportFromError(errors.ErrDoesntMatchGUIDRegex, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/passwd.go b/vendor/github.com/coreos/ignition/config/v2_0/types/passwd.go deleted file mode 100644 index 0ffff43bb84..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/passwd.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type Passwd struct { - Users []User `json:"users,omitempty"` - Groups []Group `json:"groups,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/path.go b/vendor/github.com/coreos/ignition/config/v2_0/types/path.go deleted file mode 100644 index dcf35f80756..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/path.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "path" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Path string - -func (p Path) MarshalJSON() ([]byte, error) { - return []byte(`"` + string(p) + `"`), nil -} - -func (p Path) Validate() report.Report { - if !path.IsAbs(string(p)) { - return report.ReportFromError(errors.ErrPathRelative, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/raid.go b/vendor/github.com/coreos/ignition/config/v2_0/types/raid.go deleted file mode 100644 index 329b123e6d0..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/raid.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Raid struct { - Name string `json:"name"` - Level string `json:"level"` - Devices []Path `json:"devices,omitempty"` - Spares int `json:"spares,omitempty"` -} - -func (n Raid) Validate() report.Report { - switch n.Level { - case "linear", "raid0", "0", "stripe": - if n.Spares != 0 { - return report.ReportFromError(errors.ErrSparesUnsupportedForLevel, report.EntryError) - } - case "raid1", "1", "mirror": - case "raid4", "4": - case "raid5", "5": - case "raid6", "6": - case "raid10", "10": - default: - return report.ReportFromError(errors.ErrUnrecognizedRaidLevel, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/storage.go b/vendor/github.com/coreos/ignition/config/v2_0/types/storage.go deleted file mode 100644 index bd7343778a9..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/storage.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type Storage struct { - Disks []Disk `json:"disks,omitempty"` - Arrays []Raid `json:"raid,omitempty"` - Filesystems []Filesystem `json:"filesystems,omitempty"` - Files []File `json:"files,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/systemd.go b/vendor/github.com/coreos/ignition/config/v2_0/types/systemd.go deleted file mode 100644 index 97194b91150..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/systemd.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type Systemd struct { - Units []SystemdUnit `json:"units,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/unit.go b/vendor/github.com/coreos/ignition/config/v2_0/types/unit.go deleted file mode 100644 index 06d99f26616..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/unit.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "path" - "strings" - - "github.com/coreos/go-systemd/unit" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/shared/validations" - "github.com/coreos/ignition/config/validate/report" -) - -type SystemdUnit struct { - Name SystemdUnitName `json:"name,omitempty"` - Enable bool `json:"enable,omitempty"` - Mask bool `json:"mask,omitempty"` - Contents string `json:"contents,omitempty"` - DropIns []SystemdUnitDropIn `json:"dropins,omitempty"` -} - -func (u SystemdUnit) Validate() report.Report { - r := report.Report{} - opts, err := validateUnitContent(u.Contents) - if err != nil { - return report.ReportFromError(err, report.EntryError) - } - - r.Merge(validations.ValidateInstallSection(string(u.Name), u.Enable, u.Contents == "", opts)) - - return r -} - -type SystemdUnitDropIn struct { - Name SystemdUnitDropInName `json:"name,omitempty"` - Contents string `json:"contents,omitempty"` -} - -func (u SystemdUnitDropIn) Validate() report.Report { - if _, err := validateUnitContent(u.Contents); err != nil { - return report.ReportFromError(err, report.EntryError) - } - - return report.Report{} -} - -type SystemdUnitName string - -func (n SystemdUnitName) Validate() report.Report { - switch path.Ext(string(n)) { - case ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice", ".scope": - return report.Report{} - default: - return report.ReportFromError(errors.ErrInvalidSystemdExt, report.EntryError) - } -} - -type SystemdUnitDropInName string - -func (n SystemdUnitDropInName) Validate() report.Report { - switch path.Ext(string(n)) { - case ".conf": - return report.Report{} - default: - return report.ReportFromError(errors.ErrInvalidSystemdDropinExt, report.EntryError) - } -} - -type NetworkdUnit struct { - Name NetworkdUnitName `json:"name,omitempty"` - Contents string `json:"contents,omitempty"` -} - -func (u NetworkdUnit) Validate() report.Report { - if _, err := validateUnitContent(u.Contents); err != nil { - return report.ReportFromError(err, report.EntryError) - } - - return report.Report{} -} - -type NetworkdUnitName string - -func (n NetworkdUnitName) Validate() report.Report { - switch path.Ext(string(n)) { - case ".link", ".netdev", ".network": - return report.Report{} - default: - return report.ReportFromError(errors.ErrInvalidNetworkdExt, report.EntryError) - } -} - -func validateUnitContent(content string) ([]*unit.UnitOption, error) { - c := strings.NewReader(content) - opts, err := unit.Deserialize(c) - if err != nil { - return nil, fmt.Errorf("invalid unit content: %s", err) - } - return opts, nil -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/url.go b/vendor/github.com/coreos/ignition/config/v2_0/types/url.go deleted file mode 100644 index b8ed96118be..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/url.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "encoding/json" - "net/url" - - "github.com/vincent-petithory/dataurl" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -type Url url.URL - -func (u *Url) UnmarshalJSON(data []byte) error { - var tu string - if err := json.Unmarshal(data, &tu); err != nil { - return err - } - - pu, err := url.Parse(tu) - if err != nil { - return errors.ErrInvalidUrl - } - - *u = Url(*pu) - return nil -} - -func (u Url) MarshalJSON() ([]byte, error) { - return []byte(`"` + u.String() + `"`), nil -} - -func (u Url) String() string { - tu := url.URL(u) - return (&tu).String() -} - -func (u Url) Validate() report.Report { - // Empty url is valid, indicates an empty file - if u.String() == "" { - return report.Report{} - } - switch url.URL(u).Scheme { - case "http", "https", "oem": - return report.Report{} - case "data": - if _, err := dataurl.DecodeString(u.String()); err != nil { - return report.ReportFromError(err, report.EntryError) - } - return report.Report{} - default: - return report.ReportFromError(errors.ErrInvalidScheme, report.EntryError) - } -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/user.go b/vendor/github.com/coreos/ignition/config/v2_0/types/user.go deleted file mode 100644 index f6653e27494..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/user.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type User struct { - Name string `json:"name,omitempty"` - PasswordHash string `json:"passwordHash,omitempty"` - SSHAuthorizedKeys []string `json:"sshAuthorizedKeys,omitempty"` - Create *UserCreate `json:"create,omitempty"` -} - -type UserCreate struct { - Uid *uint `json:"uid,omitempty"` - GECOS string `json:"gecos,omitempty"` - Homedir string `json:"homeDir,omitempty"` - NoCreateHome bool `json:"noCreateHome,omitempty"` - PrimaryGroup string `json:"primaryGroup,omitempty"` - Groups []string `json:"groups,omitempty"` - NoUserGroup bool `json:"noUserGroup,omitempty"` - System bool `json:"system,omitempty"` - NoLogInit bool `json:"noLogInit,omitempty"` - Shell string `json:"shell,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v2_0/types/verification.go b/vendor/github.com/coreos/ignition/config/v2_0/types/verification.go deleted file mode 100644 index b7cef403e87..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_0/types/verification.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -type Verification struct { - Hash *Hash `json:"hash,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/append.go b/vendor/github.com/coreos/ignition/config/v2_1/append.go deleted file mode 100644 index b1517b73106..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/append.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2_1 - -import ( - "reflect" - - "github.com/coreos/ignition/config/v2_1/types" -) - -// Append appends newConfig to oldConfig and returns the result. Appending one -// config to another is accomplished by iterating over every field in the -// config structure, appending slices, recursively appending structs, and -// overwriting old values with new values for all other types. -func Append(oldConfig, newConfig types.Config) types.Config { - vOld := reflect.ValueOf(oldConfig) - vNew := reflect.ValueOf(newConfig) - - vResult := appendStruct(vOld, vNew) - - return vResult.Interface().(types.Config) -} - -// appendStruct is an internal helper function to AppendConfig. Given two values -// of structures (assumed to be the same type), recursively iterate over every -// field in the struct, appending slices, recursively appending structs, and -// overwriting old values with the new for all other types. Some individual -// struct fields have alternate merge strategies, determined by the field name. -// Currently these fields are "ignition.version", which uses the old value, and -// "ignition.config" which uses the new value. -func appendStruct(vOld, vNew reflect.Value) reflect.Value { - tOld := vOld.Type() - vRes := reflect.New(tOld) - - for i := 0; i < tOld.NumField(); i++ { - vfOld := vOld.Field(i) - vfNew := vNew.Field(i) - vfRes := vRes.Elem().Field(i) - - switch tOld.Field(i).Name { - case "Version": - vfRes.Set(vfOld) - continue - case "Config": - vfRes.Set(vfNew) - continue - } - - switch vfOld.Type().Kind() { - case reflect.Struct: - vfRes.Set(appendStruct(vfOld, vfNew)) - case reflect.Slice: - vfRes.Set(reflect.AppendSlice(vfOld, vfNew)) - default: - vfRes.Set(vfNew) - } - } - - return vRes.Elem() -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/cloudinit.go b/vendor/github.com/coreos/ignition/config/v2_1/cloudinit.go deleted file mode 100644 index a019320f41d..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/cloudinit.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// These functions are copied from github.com/coreos/coreos-cloudinit/config. - -package v2_1 - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "strings" - "unicode" -) - -func isCloudConfig(userdata []byte) bool { - header := strings.SplitN(string(decompressIfGzipped(userdata)), "\n", 2)[0] - - // Trim trailing whitespaces - header = strings.TrimRightFunc(header, unicode.IsSpace) - - return (header == "#cloud-config") -} - -func isScript(userdata []byte) bool { - header := strings.SplitN(string(decompressIfGzipped(userdata)), "\n", 2)[0] - return strings.HasPrefix(header, "#!") -} - -func decompressIfGzipped(data []byte) []byte { - if reader, err := gzip.NewReader(bytes.NewReader(data)); err == nil { - uncompressedData, err := ioutil.ReadAll(reader) - reader.Close() - if err == nil { - return uncompressedData - } else { - return data - } - } else { - return data - } -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/config.go b/vendor/github.com/coreos/ignition/config/v2_1/config.go deleted file mode 100644 index 3fd271dd7e9..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/config.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2_1 - -import ( - "github.com/coreos/go-semver/semver" - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/v2_0" - "github.com/coreos/ignition/config/v2_1/types" - "github.com/coreos/ignition/config/validate" - "github.com/coreos/ignition/config/validate/report" - - json "github.com/ajeddeloh/go-json" -) - -func Parse(rawConfig []byte) (types.Config, report.Report, error) { - if isEmpty(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrEmpty - } else if isCloudConfig(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrCloudConfig - } else if isScript(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrScript - } - - var err error - var config types.Config - - err = json.Unmarshal(rawConfig, &config) - - version, semverErr := semver.NewVersion(config.Ignition.Version) - - if err != nil || semverErr != nil || version.LessThan(types.MaxVersion) { - // We can fail unmarshaling if it's an older config. Attempt to parse - // it as such. - config, rpt, err := v2_0.Parse(rawConfig) - if err != nil { - return types.Config{}, rpt, err - } - return TranslateFromV2_0(config), rpt, err - } - - if *version != types.MaxVersion { - return types.Config{}, report.Report{}, errors.ErrUnknownVersion - } - - rpt := validate.ValidateConfig(rawConfig, config) - if rpt.IsFatal() { - return types.Config{}, rpt, errors.ErrInvalid - } - - return config, rpt, nil -} - -func isEmpty(userdata []byte) bool { - return len(userdata) == 0 -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/translate.go b/vendor/github.com/coreos/ignition/config/v2_1/translate.go deleted file mode 100644 index e6b80dd1222..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/translate.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2_1 - -import ( - "strings" - - "github.com/coreos/ignition/config/util" - v2_0 "github.com/coreos/ignition/config/v2_0/types" - "github.com/coreos/ignition/config/v2_1/types" -) - -// golang-- -func translateV2_0MkfsOptionsTov2_1OptionSlice(opts v2_0.MkfsOptions) []types.CreateOption { - newOpts := make([]types.CreateOption, len(opts)) - for i, o := range opts { - newOpts[i] = types.CreateOption(o) - } - return newOpts -} - -// golang-- -func translateStringSliceTov2_1SSHAuthorizedKeySlice(keys []string) []types.SSHAuthorizedKey { - newKeys := make([]types.SSHAuthorizedKey, len(keys)) - for i, k := range keys { - newKeys[i] = types.SSHAuthorizedKey(k) - } - return newKeys -} - -// golang-- -func translateStringSliceTov2_1UsercreateGroupSlice(groups []string) []types.UsercreateGroup { - var newGroups []types.UsercreateGroup - for _, g := range groups { - newGroups = append(newGroups, types.UsercreateGroup(g)) - } - return newGroups -} - -func TranslateFromV2_0(old v2_0.Config) types.Config { - translateVerification := func(old v2_0.Verification) types.Verification { - var ver types.Verification - if old.Hash != nil { - // .String() here is a wrapper around MarshalJSON, which will put the hash in quotes - h := strings.Trim(old.Hash.String(), "\"") - ver.Hash = &h - } - return ver - } - translateConfigReference := func(old v2_0.ConfigReference) types.ConfigReference { - return types.ConfigReference{ - Source: old.Source.String(), - Verification: translateVerification(old.Verification), - } - } - - config := types.Config{ - Ignition: types.Ignition{ - Version: types.MaxVersion.String(), - }, - } - - if old.Ignition.Config.Replace != nil { - ref := translateConfigReference(*old.Ignition.Config.Replace) - config.Ignition.Config.Replace = &ref - } - - for _, oldAppend := range old.Ignition.Config.Append { - config.Ignition.Config.Append = - append(config.Ignition.Config.Append, translateConfigReference(oldAppend)) - } - - for _, oldDisk := range old.Storage.Disks { - disk := types.Disk{ - Device: string(oldDisk.Device), - WipeTable: oldDisk.WipeTable, - } - - for _, oldPartition := range oldDisk.Partitions { - disk.Partitions = append(disk.Partitions, types.Partition{ - Label: string(oldPartition.Label), - Number: oldPartition.Number, - Size: int(oldPartition.Size), - Start: int(oldPartition.Start), - TypeGUID: string(oldPartition.TypeGUID), - }) - } - - config.Storage.Disks = append(config.Storage.Disks, disk) - } - - for _, oldArray := range old.Storage.Arrays { - array := types.Raid{ - Name: oldArray.Name, - Level: oldArray.Level, - Spares: oldArray.Spares, - } - - for _, oldDevice := range oldArray.Devices { - array.Devices = append(array.Devices, types.Device(oldDevice)) - } - - config.Storage.Raid = append(config.Storage.Raid, array) - } - - for _, oldFilesystem := range old.Storage.Filesystems { - filesystem := types.Filesystem{ - Name: oldFilesystem.Name, - } - - if oldFilesystem.Mount != nil { - filesystem.Mount = &types.Mount{ - Device: string(oldFilesystem.Mount.Device), - Format: string(oldFilesystem.Mount.Format), - } - - if oldFilesystem.Mount.Create != nil { - filesystem.Mount.Create = &types.Create{ - Force: oldFilesystem.Mount.Create.Force, - Options: translateV2_0MkfsOptionsTov2_1OptionSlice(oldFilesystem.Mount.Create.Options), - } - } - } - - if oldFilesystem.Path != nil { - p := string(*oldFilesystem.Path) - filesystem.Path = &p - } - - config.Storage.Filesystems = append(config.Storage.Filesystems, filesystem) - } - - for _, oldFile := range old.Storage.Files { - file := types.File{ - Node: types.Node{ - Filesystem: oldFile.Filesystem, - Path: string(oldFile.Path), - User: types.NodeUser{ID: util.IntToPtr(oldFile.User.Id)}, - Group: types.NodeGroup{ID: util.IntToPtr(oldFile.Group.Id)}, - }, - FileEmbedded1: types.FileEmbedded1{ - Mode: int(oldFile.Mode), - Contents: types.FileContents{ - Compression: string(oldFile.Contents.Compression), - Source: oldFile.Contents.Source.String(), - Verification: translateVerification(oldFile.Contents.Verification), - }, - }, - } - - config.Storage.Files = append(config.Storage.Files, file) - } - - for _, oldUnit := range old.Systemd.Units { - unit := types.Unit{ - Name: string(oldUnit.Name), - Enable: oldUnit.Enable, - Mask: oldUnit.Mask, - Contents: oldUnit.Contents, - } - - for _, oldDropIn := range oldUnit.DropIns { - unit.Dropins = append(unit.Dropins, types.Dropin{ - Name: string(oldDropIn.Name), - Contents: oldDropIn.Contents, - }) - } - - config.Systemd.Units = append(config.Systemd.Units, unit) - } - - for _, oldUnit := range old.Networkd.Units { - config.Networkd.Units = append(config.Networkd.Units, types.Networkdunit{ - Name: string(oldUnit.Name), - Contents: oldUnit.Contents, - }) - } - - for _, oldUser := range old.Passwd.Users { - user := types.PasswdUser{ - Name: oldUser.Name, - PasswordHash: util.StrToPtr(oldUser.PasswordHash), - SSHAuthorizedKeys: translateStringSliceTov2_1SSHAuthorizedKeySlice(oldUser.SSHAuthorizedKeys), - } - - if oldUser.Create != nil { - var u *int - if oldUser.Create.Uid != nil { - tmp := int(*oldUser.Create.Uid) - u = &tmp - } - user.Create = &types.Usercreate{ - UID: u, - Gecos: oldUser.Create.GECOS, - HomeDir: oldUser.Create.Homedir, - NoCreateHome: oldUser.Create.NoCreateHome, - PrimaryGroup: oldUser.Create.PrimaryGroup, - Groups: translateStringSliceTov2_1UsercreateGroupSlice(oldUser.Create.Groups), - NoUserGroup: oldUser.Create.NoUserGroup, - System: oldUser.Create.System, - NoLogInit: oldUser.Create.NoLogInit, - Shell: oldUser.Create.Shell, - } - } - - config.Passwd.Users = append(config.Passwd.Users, user) - } - - for _, oldGroup := range old.Passwd.Groups { - var g *int - if oldGroup.Gid != nil { - tmp := int(*oldGroup.Gid) - g = &tmp - } - config.Passwd.Groups = append(config.Passwd.Groups, types.PasswdGroup{ - Name: oldGroup.Name, - Gid: g, - PasswordHash: oldGroup.PasswordHash, - System: oldGroup.System, - }) - } - - return config -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/config.go b/vendor/github.com/coreos/ignition/config/v2_1/types/config.go deleted file mode 100644 index 0e83bc6ad10..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/config.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - - "github.com/coreos/go-semver/semver" - - "github.com/coreos/ignition/config/validate/report" -) - -var ( - MaxVersion = semver.Version{ - Major: 2, - Minor: 1, - } -) - -func (c Config) Validate() report.Report { - r := report.Report{} - rules := []rule{ - checkFilesFilesystems, - checkDuplicateFilesystems, - } - - for _, rule := range rules { - rule(c, &r) - } - return r -} - -type rule func(cfg Config, report *report.Report) - -func checkNodeFilesystems(node Node, filesystems map[string]struct{}, nodeType string) report.Report { - r := report.Report{} - if node.Filesystem == "" { - // Filesystem was not specified. This is an error, but its handled in types.File's Validate, not here - return r - } - _, ok := filesystems[node.Filesystem] - if !ok { - r.Add(report.Entry{ - Kind: report.EntryWarning, - Message: fmt.Sprintf("%v %q references nonexistent filesystem %q. (This is ok if it is defined in a referenced config)", - nodeType, node.Path, node.Filesystem), - }) - } - return r -} - -func checkFilesFilesystems(cfg Config, r *report.Report) { - filesystems := map[string]struct{}{"root": {}} - for _, filesystem := range cfg.Storage.Filesystems { - filesystems[filesystem.Name] = struct{}{} - } - for _, file := range cfg.Storage.Files { - r.Merge(checkNodeFilesystems(file.Node, filesystems, "File")) - } - for _, link := range cfg.Storage.Links { - r.Merge(checkNodeFilesystems(link.Node, filesystems, "Link")) - } - for _, dir := range cfg.Storage.Directories { - r.Merge(checkNodeFilesystems(dir.Node, filesystems, "Directory")) - } -} - -func checkDuplicateFilesystems(cfg Config, r *report.Report) { - filesystems := map[string]struct{}{"root": {}} - for _, filesystem := range cfg.Storage.Filesystems { - if _, ok := filesystems[filesystem.Name]; ok { - r.Add(report.Entry{ - Kind: report.EntryWarning, - Message: fmt.Sprintf("Filesystem %q shadows exising filesystem definition", filesystem.Name), - }) - } - filesystems[filesystem.Name] = struct{}{} - } -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/directory.go b/vendor/github.com/coreos/ignition/config/v2_1/types/directory.go deleted file mode 100644 index 16adad05910..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/directory.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/validate/report" -) - -func (d Directory) ValidateMode() report.Report { - r := report.Report{} - if err := validateMode(d.Mode); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/disk.go b/vendor/github.com/coreos/ignition/config/v2_1/types/disk.go deleted file mode 100644 index f0af504a171..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/disk.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (n Disk) Validate() report.Report { - return report.Report{} -} - -func (n Disk) ValidateDevice() report.Report { - if len(n.Device) == 0 { - return report.ReportFromError(errors.ErrDiskDeviceRequired, report.EntryError) - } - if err := validatePath(string(n.Device)); err != nil { - return report.ReportFromError(err, report.EntryError) - } - return report.Report{} -} - -func (n Disk) ValidatePartitions() report.Report { - r := report.Report{} - if n.partitionNumbersCollide() { - r.Add(report.Entry{ - Message: errors.ErrPartitionNumbersCollide.Error(), - Kind: report.EntryError, - }) - } - if n.partitionsOverlap() { - r.Add(report.Entry{ - Message: errors.ErrPartitionsOverlap.Error(), - Kind: report.EntryError, - }) - } - if n.partitionsMisaligned() { - r.Add(report.Entry{ - Message: errors.ErrPartitionsMisaligned.Error(), - Kind: report.EntryError, - }) - } - // Disks which have no errors at this point will likely succeed in sgdisk - return r -} - -// partitionNumbersCollide returns true if partition numbers in n.Partitions are not unique. -func (n Disk) partitionNumbersCollide() bool { - m := map[int][]Partition{} - for _, p := range n.Partitions { - if p.Number != 0 { - // a number of 0 means next available number, multiple devices can specify this - m[p.Number] = append(m[p.Number], p) - } - } - for _, n := range m { - if len(n) > 1 { - // TODO(vc): return information describing the collision for logging - return true - } - } - return false -} - -// end returns the last sector of a partition. -func (p Partition) end() int { - if p.Size == 0 { - // a size of 0 means "fill available", just return the start as the end for those. - return p.Start - } - return p.Start + p.Size - 1 -} - -// partitionsOverlap returns true if any explicitly dimensioned partitions overlap -func (n Disk) partitionsOverlap() bool { - for _, p := range n.Partitions { - // Starts of 0 are placed by sgdisk into the "largest available block" at that time. - // We aren't going to check those for overlap since we don't have the disk geometry. - if p.Start == 0 { - continue - } - - for _, o := range n.Partitions { - if p == o || o.Start == 0 { - continue - } - - // is p.Start within o? - if p.Start >= o.Start && p.Start <= o.end() { - return true - } - - // is p.end() within o? - if p.end() >= o.Start && p.end() <= o.end() { - return true - } - - // do p.Start and p.end() straddle o? - if p.Start < o.Start && p.end() > o.end() { - return true - } - } - } - return false -} - -// partitionsMisaligned returns true if any of the partitions don't start on a 2048-sector (1MiB) boundary. -func (n Disk) partitionsMisaligned() bool { - for _, p := range n.Partitions { - if (p.Start & (2048 - 1)) != 0 { - return true - } - } - return false -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/file.go b/vendor/github.com/coreos/ignition/config/v2_1/types/file.go deleted file mode 100644 index ac79cd85854..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/file.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (f File) ValidateMode() report.Report { - r := report.Report{} - if err := validateMode(f.Mode); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (fc FileContents) ValidateCompression() report.Report { - r := report.Report{} - switch fc.Compression { - case "", "gzip": - default: - r.Add(report.Entry{ - Message: errors.ErrCompressionInvalid.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (fc FileContents) ValidateSource() report.Report { - r := report.Report{} - err := validateURL(fc.Source) - if err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/filesystem.go b/vendor/github.com/coreos/ignition/config/v2_1/types/filesystem.go deleted file mode 100644 index a2e43ffda1d..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/filesystem.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (f Filesystem) Validate() report.Report { - r := report.Report{} - if f.Mount == nil && f.Path == nil { - r.Add(report.Entry{ - Message: errors.ErrFilesystemNoMountPath.Error(), - Kind: report.EntryError, - }) - } - if f.Mount != nil { - if f.Path != nil { - r.Add(report.Entry{ - Message: errors.ErrFilesystemMountAndPath.Error(), - Kind: report.EntryError, - }) - } - if f.Mount.Create != nil { - if f.Mount.WipeFilesystem { - r.Add(report.Entry{ - Message: errors.ErrUsedCreateAndWipeFilesystem.Error(), - Kind: report.EntryError, - }) - } - if len(f.Mount.Options) > 0 { - r.Add(report.Entry{ - Message: errors.ErrUsedCreateAndMountOpts.Error(), - Kind: report.EntryError, - }) - } - r.Add(report.Entry{ - Message: errors.ErrWarningCreateDeprecated.Error(), - Kind: report.EntryWarning, - }) - } - } - return r -} - -func (f Filesystem) ValidatePath() report.Report { - r := report.Report{} - if f.Path != nil && validatePath(*f.Path) != nil { - r.Add(report.Entry{ - Message: errors.ErrPathRelative.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (m Mount) Validate() report.Report { - r := report.Report{} - switch m.Format { - case "ext4", "btrfs", "xfs", "swap", "vfat": - default: - r.Add(report.Entry{ - Message: errors.ErrFilesystemInvalidFormat.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (m Mount) ValidateDevice() report.Report { - r := report.Report{} - if err := validatePath(m.Device); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (m Mount) ValidateLabel() report.Report { - r := report.Report{} - if m.Label == nil { - return r - } - switch m.Format { - case "ext4": - if len(*m.Label) > 16 { - // source: man mkfs.ext4 - r.Add(report.Entry{ - Message: errors.ErrExt4LabelTooLong.Error(), - Kind: report.EntryError, - }) - } - case "btrfs": - if len(*m.Label) > 256 { - // source: man mkfs.btrfs - r.Add(report.Entry{ - Message: errors.ErrBtrfsLabelTooLong.Error(), - Kind: report.EntryError, - }) - } - case "xfs": - if len(*m.Label) > 12 { - // source: man mkfs.xfs - r.Add(report.Entry{ - Message: errors.ErrXfsLabelTooLong.Error(), - Kind: report.EntryError, - }) - } - case "swap": - // mkswap's man page does not state a limit on label size, but through - // experimentation it appears that mkswap will truncate long labels to - // 15 characters, so let's enforce that. - if len(*m.Label) > 15 { - r.Add(report.Entry{ - Message: errors.ErrSwapLabelTooLong.Error(), - Kind: report.EntryError, - }) - } - case "vfat": - if len(*m.Label) > 11 { - // source: man mkfs.fat - r.Add(report.Entry{ - Message: errors.ErrVfatLabelTooLong.Error(), - Kind: report.EntryError, - }) - } - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/ignition.go b/vendor/github.com/coreos/ignition/config/v2_1/types/ignition.go deleted file mode 100644 index bddf4958338..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/ignition.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/go-semver/semver" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (c ConfigReference) ValidateSource() report.Report { - r := report.Report{} - err := validateURL(c.Source) - if err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (v Ignition) Semver() (*semver.Version, error) { - return semver.NewVersion(v.Version) -} - -func (v Ignition) Validate() report.Report { - tv, err := v.Semver() - if err != nil { - return report.ReportFromError(errors.ErrInvalidVersion, report.EntryError) - } - if MaxVersion.Major > tv.Major { - return report.ReportFromError(errors.ErrOldVersion, report.EntryError) - } - if MaxVersion.LessThan(*tv) { - return report.ReportFromError(errors.ErrNewVersion, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/link.go b/vendor/github.com/coreos/ignition/config/v2_1/types/link.go deleted file mode 100644 index f0284425282..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/link.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/validate/report" -) - -func (s LinkEmbedded1) ValidateTarget() report.Report { - r := report.Report{} - if !s.Hard { - err := validatePath(s.Target) - if err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/mode.go b/vendor/github.com/coreos/ignition/config/v2_1/types/mode.go deleted file mode 100644 index 12d4188e95f..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/mode.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" -) - -func validateMode(m int) error { - if m < 0 || m > 07777 { - return errors.ErrFileIllegalMode - } - return nil -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/node.go b/vendor/github.com/coreos/ignition/config/v2_1/types/node.go deleted file mode 100644 index 50badfdfb92..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/node.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "path/filepath" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (n Node) ValidateFilesystem() report.Report { - r := report.Report{} - if n.Filesystem == "" { - r.Add(report.Entry{ - Message: errors.ErrNoFilesystem.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (n Node) ValidatePath() report.Report { - r := report.Report{} - if err := validatePath(n.Path); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (n Node) Depth() int { - count := 0 - for p := filepath.Clean(string(n.Path)); p != "/"; count++ { - p = filepath.Dir(p) - } - return count -} - -func (nu NodeUser) Validate() report.Report { - r := report.Report{} - if nu.ID != nil && nu.Name != "" { - r.Add(report.Entry{ - Message: errors.ErrBothIDAndNameSet.Error(), - Kind: report.EntryError, - }) - } - return r -} -func (ng NodeGroup) Validate() report.Report { - r := report.Report{} - if ng.ID != nil && ng.Name != "" { - r.Add(report.Entry{ - Message: errors.ErrBothIDAndNameSet.Error(), - Kind: report.EntryError, - }) - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/partition.go b/vendor/github.com/coreos/ignition/config/v2_1/types/partition.go deleted file mode 100644 index 084dce7ce23..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/partition.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "regexp" - "strings" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -const ( - guidRegexStr = "^(|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$" -) - -func (p Partition) ValidateLabel() report.Report { - r := report.Report{} - // http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries: - // 56 (0x38) 72 bytes Partition name (36 UTF-16LE code units) - - // XXX(vc): note GPT calls it a name, we're using label for consistency - // with udev naming /dev/disk/by-partlabel/*. - if len(p.Label) > 36 { - r.Add(report.Entry{ - Message: errors.ErrLabelTooLong.Error(), - Kind: report.EntryError, - }) - } - - // sgdisk uses colons for delimitting compound arguments and does not allow escaping them. - if strings.Contains(p.Label, ":") { - r.Add(report.Entry{ - Message: errors.ErrLabelContainsColon.Error(), - Kind: report.EntryWarning, - }) - } - return r -} - -func (p Partition) ValidateTypeGUID() report.Report { - return validateGUID(p.TypeGUID) -} - -func (p Partition) ValidateGUID() report.Report { - return validateGUID(p.GUID) -} - -func validateGUID(guid string) report.Report { - r := report.Report{} - ok, err := regexp.MatchString(guidRegexStr, guid) - if err != nil { - r.Add(report.Entry{ - Message: fmt.Sprintf("error matching guid regexp: %v", err), - Kind: report.EntryError, - }) - } else if !ok { - r.Add(report.Entry{ - Message: errors.ErrDoesntMatchGUIDRegex.Error(), - Kind: report.EntryError, - }) - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/passwd.go b/vendor/github.com/coreos/ignition/config/v2_1/types/passwd.go deleted file mode 100644 index 10508c56c06..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/passwd.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (p PasswdUser) Validate() report.Report { - r := report.Report{} - if p.Create != nil { - r.Add(report.Entry{ - Message: errors.ErrPasswdCreateDeprecated.Error(), - Kind: report.EntryWarning, - }) - addErr := func(err error) { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - if p.Gecos != "" { - addErr(errors.ErrPasswdCreateAndGecos) - } - if len(p.Groups) > 0 { - addErr(errors.ErrPasswdCreateAndGroups) - } - if p.HomeDir != "" { - addErr(errors.ErrPasswdCreateAndHomeDir) - } - if p.NoCreateHome { - addErr(errors.ErrPasswdCreateAndNoCreateHome) - } - if p.NoLogInit { - addErr(errors.ErrPasswdCreateAndNoLogInit) - } - if p.NoUserGroup { - addErr(errors.ErrPasswdCreateAndNoUserGroup) - } - if p.PrimaryGroup != "" { - addErr(errors.ErrPasswdCreateAndPrimaryGroup) - } - if p.Shell != "" { - addErr(errors.ErrPasswdCreateAndShell) - } - if p.System { - addErr(errors.ErrPasswdCreateAndSystem) - } - if p.UID != nil { - addErr(errors.ErrPasswdCreateAndUID) - } - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/path.go b/vendor/github.com/coreos/ignition/config/v2_1/types/path.go deleted file mode 100644 index 780607c31ab..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/path.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "path" - - "github.com/coreos/ignition/config/shared/errors" -) - -func validatePath(p string) error { - if !path.IsAbs(p) { - return errors.ErrPathRelative - } - return nil -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/raid.go b/vendor/github.com/coreos/ignition/config/v2_1/types/raid.go deleted file mode 100644 index 3aceaa9faa1..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/raid.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (n Raid) ValidateLevel() report.Report { - r := report.Report{} - switch n.Level { - case "linear", "raid0", "0", "stripe": - if n.Spares != 0 { - r.Add(report.Entry{ - Message: errors.ErrSparesUnsupportedForLevel.Error(), - Kind: report.EntryError, - }) - } - case "raid1", "1", "mirror": - case "raid4", "4": - case "raid5", "5": - case "raid6", "6": - case "raid10", "10": - default: - r.Add(report.Entry{ - Message: errors.ErrUnrecognizedRaidLevel.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (n Raid) ValidateDevices() report.Report { - r := report.Report{} - for _, d := range n.Devices { - if err := validatePath(string(d)); err != nil { - r.Add(report.Entry{ - Message: errors.ErrPathRelative.Error(), - Kind: report.EntryError, - }) - } - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/schema.go b/vendor/github.com/coreos/ignition/config/v2_1/types/schema.go deleted file mode 100644 index e0caed5e6e2..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/schema.go +++ /dev/null @@ -1,221 +0,0 @@ -package types - -// generated by "schematyper --package=types schema/ignition.json -o config/types/schema.go --root-type=Config" -- DO NOT EDIT - -type Config struct { - Ignition Ignition `json:"ignition"` - Networkd Networkd `json:"networkd,omitempty"` - Passwd Passwd `json:"passwd,omitempty"` - Storage Storage `json:"storage,omitempty"` - Systemd Systemd `json:"systemd,omitempty"` -} - -type ConfigReference struct { - Source string `json:"source,omitempty"` - Verification Verification `json:"verification,omitempty"` -} - -type Create struct { - Force bool `json:"force,omitempty"` - Options []CreateOption `json:"options,omitempty"` -} - -type CreateOption string - -type Device string - -type Directory struct { - Node - DirectoryEmbedded1 -} - -type DirectoryEmbedded1 struct { - Mode int `json:"mode,omitempty"` -} - -type Disk struct { - Device string `json:"device,omitempty"` - Partitions []Partition `json:"partitions,omitempty"` - WipeTable bool `json:"wipeTable,omitempty"` -} - -type Dropin struct { - Contents string `json:"contents,omitempty"` - Name string `json:"name,omitempty"` -} - -type File struct { - Node - FileEmbedded1 -} - -type FileContents struct { - Compression string `json:"compression,omitempty"` - Source string `json:"source,omitempty"` - Verification Verification `json:"verification,omitempty"` -} - -type FileEmbedded1 struct { - Contents FileContents `json:"contents,omitempty"` - Mode int `json:"mode,omitempty"` -} - -type Filesystem struct { - Mount *Mount `json:"mount,omitempty"` - Name string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` -} - -type Ignition struct { - Config IgnitionConfig `json:"config,omitempty"` - Timeouts Timeouts `json:"timeouts,omitempty"` - Version string `json:"version,omitempty"` -} - -type IgnitionConfig struct { - Append []ConfigReference `json:"append,omitempty"` - Replace *ConfigReference `json:"replace,omitempty"` -} - -type Link struct { - Node - LinkEmbedded1 -} - -type LinkEmbedded1 struct { - Hard bool `json:"hard,omitempty"` - Target string `json:"target,omitempty"` -} - -type Mount struct { - Create *Create `json:"create,omitempty"` - Device string `json:"device,omitempty"` - Format string `json:"format,omitempty"` - Label *string `json:"label,omitempty"` - Options []MountOption `json:"options,omitempty"` - UUID *string `json:"uuid,omitempty"` - WipeFilesystem bool `json:"wipeFilesystem,omitempty"` -} - -type MountOption string - -type Networkd struct { - Units []Networkdunit `json:"units,omitempty"` -} - -type Networkdunit struct { - Contents string `json:"contents,omitempty"` - Name string `json:"name,omitempty"` -} - -type Node struct { - Filesystem string `json:"filesystem,omitempty"` - Group NodeGroup `json:"group,omitempty"` - Path string `json:"path,omitempty"` - User NodeUser `json:"user,omitempty"` -} - -type NodeGroup struct { - ID *int `json:"id,omitempty"` - Name string `json:"name,omitempty"` -} - -type NodeUser struct { - ID *int `json:"id,omitempty"` - Name string `json:"name,omitempty"` -} - -type Partition struct { - GUID string `json:"guid,omitempty"` - Label string `json:"label,omitempty"` - Number int `json:"number,omitempty"` - Size int `json:"size,omitempty"` - Start int `json:"start,omitempty"` - TypeGUID string `json:"typeGuid,omitempty"` -} - -type Passwd struct { - Groups []PasswdGroup `json:"groups,omitempty"` - Users []PasswdUser `json:"users,omitempty"` -} - -type PasswdGroup struct { - Gid *int `json:"gid,omitempty"` - Name string `json:"name,omitempty"` - PasswordHash string `json:"passwordHash,omitempty"` - System bool `json:"system,omitempty"` -} - -type PasswdUser struct { - Create *Usercreate `json:"create,omitempty"` - Gecos string `json:"gecos,omitempty"` - Groups []PasswdUserGroup `json:"groups,omitempty"` - HomeDir string `json:"homeDir,omitempty"` - Name string `json:"name,omitempty"` - NoCreateHome bool `json:"noCreateHome,omitempty"` - NoLogInit bool `json:"noLogInit,omitempty"` - NoUserGroup bool `json:"noUserGroup,omitempty"` - PasswordHash *string `json:"passwordHash,omitempty"` - PrimaryGroup string `json:"primaryGroup,omitempty"` - SSHAuthorizedKeys []SSHAuthorizedKey `json:"sshAuthorizedKeys,omitempty"` - Shell string `json:"shell,omitempty"` - System bool `json:"system,omitempty"` - UID *int `json:"uid,omitempty"` -} - -type PasswdUserGroup string - -type Raid struct { - Devices []Device `json:"devices,omitempty"` - Level string `json:"level,omitempty"` - Name string `json:"name,omitempty"` - Spares int `json:"spares,omitempty"` -} - -type SSHAuthorizedKey string - -type Storage struct { - Directories []Directory `json:"directories,omitempty"` - Disks []Disk `json:"disks,omitempty"` - Files []File `json:"files,omitempty"` - Filesystems []Filesystem `json:"filesystems,omitempty"` - Links []Link `json:"links,omitempty"` - Raid []Raid `json:"raid,omitempty"` -} - -type Systemd struct { - Units []Unit `json:"units,omitempty"` -} - -type Timeouts struct { - HTTPResponseHeaders *int `json:"httpResponseHeaders,omitempty"` - HTTPTotal *int `json:"httpTotal,omitempty"` -} - -type Unit struct { - Contents string `json:"contents,omitempty"` - Dropins []Dropin `json:"dropins,omitempty"` - Enable bool `json:"enable,omitempty"` - Enabled *bool `json:"enabled,omitempty"` - Mask bool `json:"mask,omitempty"` - Name string `json:"name,omitempty"` -} - -type Usercreate struct { - Gecos string `json:"gecos,omitempty"` - Groups []UsercreateGroup `json:"groups,omitempty"` - HomeDir string `json:"homeDir,omitempty"` - NoCreateHome bool `json:"noCreateHome,omitempty"` - NoLogInit bool `json:"noLogInit,omitempty"` - NoUserGroup bool `json:"noUserGroup,omitempty"` - PrimaryGroup string `json:"primaryGroup,omitempty"` - Shell string `json:"shell,omitempty"` - System bool `json:"system,omitempty"` - UID *int `json:"uid,omitempty"` -} - -type UsercreateGroup string - -type Verification struct { - Hash *string `json:"hash,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/unit.go b/vendor/github.com/coreos/ignition/config/v2_1/types/unit.go deleted file mode 100644 index 07e6fe6f5cc..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/unit.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "path" - "strings" - - "github.com/coreos/go-systemd/unit" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/shared/validations" - "github.com/coreos/ignition/config/validate/report" -) - -func (u Unit) ValidateContents() report.Report { - r := report.Report{} - opts, err := validateUnitContent(u.Contents) - if err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - - isEnabled := u.Enable || (u.Enabled != nil && *u.Enabled) - r.Merge(validations.ValidateInstallSection(u.Name, isEnabled, u.Contents == "", opts)) - - return r -} - -func (u Unit) ValidateName() report.Report { - r := report.Report{} - switch path.Ext(u.Name) { - case ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice", ".scope": - default: - r.Add(report.Entry{ - Message: errors.ErrInvalidSystemdExt.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (d Dropin) Validate() report.Report { - r := report.Report{} - - if _, err := validateUnitContent(d.Contents); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - - switch path.Ext(d.Name) { - case ".conf": - default: - r.Add(report.Entry{ - Message: errors.ErrInvalidSystemdDropinExt.Error(), - Kind: report.EntryError, - }) - } - - return r -} - -func (u Networkdunit) Validate() report.Report { - r := report.Report{} - - if _, err := validateUnitContent(u.Contents); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - - switch path.Ext(u.Name) { - case ".link", ".netdev", ".network": - default: - r.Add(report.Entry{ - Message: errors.ErrInvalidNetworkdExt.Error(), - Kind: report.EntryError, - }) - } - - return r -} - -func validateUnitContent(content string) ([]*unit.UnitOption, error) { - c := strings.NewReader(content) - opts, err := unit.Deserialize(c) - if err != nil { - return nil, fmt.Errorf("invalid unit content: %s", err) - } - return opts, nil -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/url.go b/vendor/github.com/coreos/ignition/config/v2_1/types/url.go deleted file mode 100644 index 0fdc4a170ef..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/url.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "net/url" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/vincent-petithory/dataurl" -) - -func validateURL(s string) error { - // Empty url is valid, indicates an empty file - if s == "" { - return nil - } - u, err := url.Parse(s) - if err != nil { - return errors.ErrInvalidUrl - } - - switch u.Scheme { - case "http", "https", "oem", "tftp", "s3": - return nil - case "data": - if _, err := dataurl.DecodeString(s); err != nil { - return err - } - return nil - default: - return errors.ErrInvalidScheme - } -} diff --git a/vendor/github.com/coreos/ignition/config/v2_1/types/verification.go b/vendor/github.com/coreos/ignition/config/v2_1/types/verification.go deleted file mode 100644 index 51e7d1550a3..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_1/types/verification.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "crypto" - "encoding/hex" - "strings" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -// HashParts will return the sum and function (in that order) of the hash stored -// in this Verification, or an error if there is an issue during parsing. -func (v Verification) HashParts() (string, string, error) { - if v.Hash == nil { - // The hash can be nil - return "", "", nil - } - parts := strings.SplitN(*v.Hash, "-", 2) - if len(parts) != 2 { - return "", "", errors.ErrHashMalformed - } - - return parts[0], parts[1], nil -} - -func (v Verification) Validate() report.Report { - r := report.Report{} - - if v.Hash == nil { - // The hash can be nil - return r - } - - function, sum, err := v.HashParts() - if err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - return r - } - var hash crypto.Hash - switch function { - case "sha512": - hash = crypto.SHA512 - default: - r.Add(report.Entry{ - Message: errors.ErrHashUnrecognized.Error(), - Kind: report.EntryError, - }) - return r - } - - if len(sum) != hex.EncodedLen(hash.Size()) { - r.Add(report.Entry{ - Message: errors.ErrHashWrongSize.Error(), - Kind: report.EntryError, - }) - } - - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/append.go b/vendor/github.com/coreos/ignition/config/v2_2/append.go deleted file mode 100644 index cf28f40905d..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/append.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2_2 - -import ( - "reflect" - - "github.com/coreos/ignition/config/v2_2/types" -) - -// Append appends newConfig to oldConfig and returns the result. Appending one -// config to another is accomplished by iterating over every field in the -// config structure, appending slices, recursively appending structs, and -// overwriting old values with new values for all other types. -func Append(oldConfig, newConfig types.Config) types.Config { - vOld := reflect.ValueOf(oldConfig) - vNew := reflect.ValueOf(newConfig) - - vResult := appendStruct(vOld, vNew) - - return vResult.Interface().(types.Config) -} - -// appendStruct is an internal helper function to AppendConfig. Given two values -// of structures (assumed to be the same type), recursively iterate over every -// field in the struct, appending slices, recursively appending structs, and -// overwriting old values with the new for all other types. Some individual -// struct fields have alternate merge strategies, determined by the field name. -// Currently these fields are "ignition.version", which uses the old value, and -// "ignition.config" which uses the new value. -func appendStruct(vOld, vNew reflect.Value) reflect.Value { - tOld := vOld.Type() - vRes := reflect.New(tOld) - - for i := 0; i < tOld.NumField(); i++ { - vfOld := vOld.Field(i) - vfNew := vNew.Field(i) - vfRes := vRes.Elem().Field(i) - - switch tOld.Field(i).Name { - case "Version": - vfRes.Set(vfOld) - continue - case "Config": - vfRes.Set(vfNew) - continue - } - - switch vfOld.Type().Kind() { - case reflect.Struct: - vfRes.Set(appendStruct(vfOld, vfNew)) - case reflect.Slice: - vfRes.Set(reflect.AppendSlice(vfOld, vfNew)) - default: - if vfNew.Kind() == reflect.Ptr && vfNew.IsNil() { - vfRes.Set(vfOld) - } else { - vfRes.Set(vfNew) - } - } - } - - return vRes.Elem() -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/cloudinit.go b/vendor/github.com/coreos/ignition/config/v2_2/cloudinit.go deleted file mode 100644 index 36a54393245..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/cloudinit.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// These functions are copied from github.com/coreos/coreos-cloudinit/config. - -package v2_2 - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "strings" - "unicode" -) - -func isCloudConfig(userdata []byte) bool { - header := strings.SplitN(string(decompressIfGzipped(userdata)), "\n", 2)[0] - - // Trim trailing whitespaces - header = strings.TrimRightFunc(header, unicode.IsSpace) - - return (header == "#cloud-config") -} - -func isScript(userdata []byte) bool { - header := strings.SplitN(string(decompressIfGzipped(userdata)), "\n", 2)[0] - return strings.HasPrefix(header, "#!") -} - -func decompressIfGzipped(data []byte) []byte { - if reader, err := gzip.NewReader(bytes.NewReader(data)); err == nil { - uncompressedData, err := ioutil.ReadAll(reader) - reader.Close() - if err == nil { - return uncompressedData - } else { - return data - } - } else { - return data - } -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/config.go b/vendor/github.com/coreos/ignition/config/v2_2/config.go deleted file mode 100644 index c934a9e4a38..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/config.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2_2 - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/v2_1" - "github.com/coreos/ignition/config/v2_2/types" - "github.com/coreos/ignition/config/validate" - "github.com/coreos/ignition/config/validate/report" - - json "github.com/ajeddeloh/go-json" - "github.com/coreos/go-semver/semver" -) - -// Parse parses the raw config into a types.Config struct and generates a report of any -// errors, warnings, info, and deprecations it encountered. Unlike config.Parse, -// it does not attempt to translate the config. -func Parse(rawConfig []byte) (types.Config, report.Report, error) { - if isEmpty(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrEmpty - } else if isCloudConfig(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrCloudConfig - } else if isScript(rawConfig) { - return types.Config{}, report.Report{}, errors.ErrScript - } - - var err error - var config types.Config - - err = json.Unmarshal(rawConfig, &config) - - version, semverErr := semver.NewVersion(config.Ignition.Version) - - if err != nil || semverErr != nil || version.LessThan(types.MaxVersion) { - // We can fail unmarshaling if it's an older config. Attempt to parse - // it as such. - config, rpt, err := v2_1.Parse(rawConfig) - if err != nil { - return types.Config{}, rpt, err - } - return TranslateFromV2_1(config), rpt, err - } - - if *version != types.MaxVersion { - return types.Config{}, report.Report{}, errors.ErrUnknownVersion - } - - rpt := validate.ValidateConfig(rawConfig, config) - if rpt.IsFatal() { - return types.Config{}, rpt, errors.ErrInvalid - } - - return config, rpt, nil -} - -func isEmpty(userdata []byte) bool { - return len(userdata) == 0 -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/translate.go b/vendor/github.com/coreos/ignition/config/v2_2/translate.go deleted file mode 100644 index 56a6b33fc94..00000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/translate.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2_2 - -import ( - "github.com/coreos/ignition/config/util" - v2_1 "github.com/coreos/ignition/config/v2_1/types" - "github.com/coreos/ignition/config/v2_2/types" -) - -// golang-- -func translateStringSliceToV2_2SSHAuthorizedKeySlice(keys []string) []types.SSHAuthorizedKey { - newKeys := make([]types.SSHAuthorizedKey, len(keys)) - for i, k := range keys { - newKeys[i] = types.SSHAuthorizedKey(k) - } - return newKeys -} - -// golang-- -func translateStringSliceToV2_2UsercreateGroupSlice(groups []string) []types.UsercreateGroup { - var newGroups []types.UsercreateGroup - for _, g := range groups { - newGroups = append(newGroups, types.UsercreateGroup(g)) - } - return newGroups -} - -func TranslateFromV2_1(old v2_1.Config) types.Config { - translateConfigReference := func(old *v2_1.ConfigReference) *types.ConfigReference { - if old == nil { - return nil - } - return &types.ConfigReference{ - Source: old.Source, - Verification: types.Verification{ - Hash: old.Verification.Hash, - }, - } - } - translateConfigReferenceSlice := func(old []v2_1.ConfigReference) []types.ConfigReference { - var res []types.ConfigReference - for _, c := range old { - res = append(res, *translateConfigReference(&c)) - } - return res - } - translateNetworkdUnitSlice := func(old []v2_1.Networkdunit) []types.Networkdunit { - var res []types.Networkdunit - for _, u := range old { - res = append(res, types.Networkdunit{ - Contents: u.Contents, - Name: u.Name, - }) - } - return res - } - translatePasswdGroupSlice := func(old []v2_1.PasswdGroup) []types.PasswdGroup { - var res []types.PasswdGroup - for _, g := range old { - res = append(res, types.PasswdGroup{ - Gid: g.Gid, - Name: g.Name, - PasswordHash: g.PasswordHash, - System: g.System, - }) - } - return res - } - translatePasswdUsercreateGroupSlice := func(old []v2_1.UsercreateGroup) []types.UsercreateGroup { - var res []types.UsercreateGroup - for _, g := range old { - res = append(res, types.UsercreateGroup(g)) - } - return res - } - translatePasswdUsercreate := func(old *v2_1.Usercreate) *types.Usercreate { - if old == nil { - return nil - } - return &types.Usercreate{ - Gecos: old.Gecos, - Groups: translatePasswdUsercreateGroupSlice(old.Groups), - HomeDir: old.HomeDir, - NoCreateHome: old.NoCreateHome, - NoLogInit: old.NoLogInit, - NoUserGroup: old.NoUserGroup, - PrimaryGroup: old.PrimaryGroup, - Shell: old.Shell, - System: old.System, - UID: old.UID, - } - } - translatePasswdUserGroupSlice := func(old []v2_1.PasswdUserGroup) []types.Group { - var res []types.Group - for _, g := range old { - res = append(res, types.Group(g)) - } - return res - } - translatePasswdSSHAuthorizedKeySlice := func(old []v2_1.SSHAuthorizedKey) []types.SSHAuthorizedKey { - res := make([]types.SSHAuthorizedKey, len(old)) - for i, k := range old { - res[i] = types.SSHAuthorizedKey(k) - } - return res - } - translatePasswdUserSlice := func(old []v2_1.PasswdUser) []types.PasswdUser { - var res []types.PasswdUser - for _, u := range old { - res = append(res, types.PasswdUser{ - Create: translatePasswdUsercreate(u.Create), - Gecos: u.Gecos, - Groups: translatePasswdUserGroupSlice(u.Groups), - HomeDir: u.HomeDir, - Name: u.Name, - NoCreateHome: u.NoCreateHome, - NoLogInit: u.NoLogInit, - NoUserGroup: u.NoUserGroup, - PasswordHash: u.PasswordHash, - PrimaryGroup: u.PrimaryGroup, - SSHAuthorizedKeys: translatePasswdSSHAuthorizedKeySlice(u.SSHAuthorizedKeys), - Shell: u.Shell, - System: u.System, - UID: u.UID, - }) - } - return res - } - translateNodeGroup := func(old v2_1.NodeGroup) *types.NodeGroup { - return &types.NodeGroup{ - ID: old.ID, - Name: old.Name, - } - } - translateNodeUser := func(old v2_1.NodeUser) *types.NodeUser { - return &types.NodeUser{ - ID: old.ID, - Name: old.Name, - } - } - translateNode := func(old v2_1.Node) types.Node { - return types.Node{ - Filesystem: old.Filesystem, - Group: translateNodeGroup(old.Group), - Path: old.Path, - User: translateNodeUser(old.User), - } - } - translateDirectorySlice := func(old []v2_1.Directory) []types.Directory { - var res []types.Directory - for _, x := range old { - res = append(res, types.Directory{ - Node: translateNode(x.Node), - DirectoryEmbedded1: types.DirectoryEmbedded1{ - Mode: util.IntToPtr(x.DirectoryEmbedded1.Mode), - }, - }) - } - return res - } - translatePartitionSlice := func(old []v2_1.Partition) []types.Partition { - var res []types.Partition - for _, x := range old { - res = append(res, types.Partition{ - GUID: x.GUID, - Label: x.Label, - Number: x.Number, - Size: x.Size, - Start: x.Start, - TypeGUID: x.TypeGUID, - }) - } - return res - } - translateDiskSlice := func(old []v2_1.Disk) []types.Disk { - var res []types.Disk - for _, x := range old { - res = append(res, types.Disk{ - Device: x.Device, - Partitions: translatePartitionSlice(x.Partitions), - WipeTable: x.WipeTable, - }) - } - return res - } - translateFileSlice := func(old []v2_1.File) []types.File { - var res []types.File - for _, x := range old { - res = append(res, types.File{ - Node: translateNode(x.Node), - FileEmbedded1: types.FileEmbedded1{ - Contents: types.FileContents{ - Compression: x.Contents.Compression, - Source: x.Contents.Source, - Verification: types.Verification{ - Hash: x.Contents.Verification.Hash, - }, - }, - Mode: util.IntToPtr(x.Mode), - }, - }) - } - return res - } - translateMountCreateOptionSlice := func(old []v2_1.CreateOption) []types.CreateOption { - var res []types.CreateOption - for _, x := range old { - res = append(res, types.CreateOption(x)) - } - return res - } - translateMountCreate := func(old *v2_1.Create) *types.Create { - if old == nil { - return nil - } - return &types.Create{ - Force: old.Force, - Options: translateMountCreateOptionSlice(old.Options), - } - } - translateMountOptionSlice := func(old []v2_1.MountOption) []types.MountOption { - var res []types.MountOption - for _, x := range old { - res = append(res, types.MountOption(x)) - } - return res - } - translateMount := func(old *v2_1.Mount) *types.Mount { - if old == nil { - return nil - } - return &types.Mount{ - Create: translateMountCreate(old.Create), - Device: old.Device, - Format: old.Format, - Label: old.Label, - Options: translateMountOptionSlice(old.Options), - UUID: old.UUID, - WipeFilesystem: old.WipeFilesystem, - } - } - translateFilesystemSlice := func(old []v2_1.Filesystem) []types.Filesystem { - var res []types.Filesystem - for _, x := range old { - res = append(res, types.Filesystem{ - Mount: translateMount(x.Mount), - Name: x.Name, - Path: x.Path, - }) - } - return res - } - translateLinkSlice := func(old []v2_1.Link) []types.Link { - var res []types.Link - for _, x := range old { - res = append(res, types.Link{ - Node: translateNode(x.Node), - LinkEmbedded1: types.LinkEmbedded1{ - Hard: x.Hard, - Target: x.Target, - }, - }) - } - return res - } - translateDeviceSlice := func(old []v2_1.Device) []types.Device { - var res []types.Device - for _, x := range old { - res = append(res, types.Device(x)) - } - return res - } - translateRaidSlice := func(old []v2_1.Raid) []types.Raid { - var res []types.Raid - for _, x := range old { - res = append(res, types.Raid{ - Devices: translateDeviceSlice(x.Devices), - Level: x.Level, - Name: x.Name, - Spares: x.Spares, - }) - } - return res - } - translateSystemdDropinSlice := func(old []v2_1.Dropin) []types.SystemdDropin { - var res []types.SystemdDropin - for _, x := range old { - res = append(res, types.SystemdDropin{ - Contents: x.Contents, - Name: x.Name, - }) - } - return res - } - translateSystemdUnitSlice := func(old []v2_1.Unit) []types.Unit { - var res []types.Unit - for _, x := range old { - res = append(res, types.Unit{ - Contents: x.Contents, - Dropins: translateSystemdDropinSlice(x.Dropins), - Enable: x.Enable, - Enabled: x.Enabled, - Mask: x.Mask, - Name: x.Name, - }) - } - return res - } - config := types.Config{ - Ignition: types.Ignition{ - Version: types.MaxVersion.String(), - Timeouts: types.Timeouts{ - HTTPResponseHeaders: old.Ignition.Timeouts.HTTPResponseHeaders, - HTTPTotal: old.Ignition.Timeouts.HTTPTotal, - }, - Config: types.IgnitionConfig{ - Replace: translateConfigReference(old.Ignition.Config.Replace), - Append: translateConfigReferenceSlice(old.Ignition.Config.Append), - }, - }, - Networkd: types.Networkd{ - Units: translateNetworkdUnitSlice(old.Networkd.Units), - }, - Passwd: types.Passwd{ - Groups: translatePasswdGroupSlice(old.Passwd.Groups), - Users: translatePasswdUserSlice(old.Passwd.Users), - }, - Storage: types.Storage{ - Directories: translateDirectorySlice(old.Storage.Directories), - Disks: translateDiskSlice(old.Storage.Disks), - Files: translateFileSlice(old.Storage.Files), - Filesystems: translateFilesystemSlice(old.Storage.Filesystems), - Links: translateLinkSlice(old.Storage.Links), - Raid: translateRaidSlice(old.Storage.Raid), - }, - Systemd: types.Systemd{ - Units: translateSystemdUnitSlice(old.Systemd.Units), - }, - } - return config -} diff --git a/vendor/github.com/coreos/ignition/config/validate/astjson/node.go b/vendor/github.com/coreos/ignition/config/validate/astjson/node.go deleted file mode 100644 index 6735fa0ab3f..00000000000 --- a/vendor/github.com/coreos/ignition/config/validate/astjson/node.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package astjson - -import ( - "io" - - json "github.com/ajeddeloh/go-json" - "github.com/coreos/ignition/config/validate/astnode" - "go4.org/errorutil" -) - -type JsonNode json.Node - -func FromJsonRoot(n json.Node) JsonNode { - return JsonNode(n) -} - -func (n JsonNode) ValueLineCol(source io.ReadSeeker) (int, int, string) { - return posFromOffset(n.End, source) -} - -func (n JsonNode) KeyLineCol(source io.ReadSeeker) (int, int, string) { - return posFromOffset(n.KeyEnd, source) -} - -func (n JsonNode) LiteralValue() interface{} { - return n.Value -} - -func (n JsonNode) SliceChild(index int) (astnode.AstNode, bool) { - if slice, ok := n.Value.([]json.Node); ok { - return JsonNode(slice[index]), true - } - return JsonNode{}, false -} - -func (n JsonNode) KeyValueMap() (map[string]astnode.AstNode, bool) { - if kvmap, ok := n.Value.(map[string]json.Node); ok { - newKvmap := map[string]astnode.AstNode{} - for k, v := range kvmap { - newKvmap[k] = JsonNode(v) - } - return newKvmap, true - } - return nil, false -} - -func (n JsonNode) Tag() string { - return "json" -} - -// wrapper for errorutil that handles missing sources sanely and resets the reader afterwards -func posFromOffset(offset int, source io.ReadSeeker) (int, int, string) { - if source == nil { - return 0, 0, "" - } - line, col, highlight := errorutil.HighlightBytePosition(source, int64(offset)) - source.Seek(0, 0) // Reset the reader to the start so the next call isn't relative to this position - return line, col, highlight -} diff --git a/vendor/github.com/coreos/ignition/config/validate/astnode/astnode.go b/vendor/github.com/coreos/ignition/config/validate/astnode/astnode.go deleted file mode 100644 index d1c1d9c2420..00000000000 --- a/vendor/github.com/coreos/ignition/config/validate/astnode/astnode.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2017 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package astnode - -import ( - "io" -) - -// AstNode abstracts the differences between yaml and json nodes, providing a -// common interface -type AstNode interface { - // ValueLineCol returns the line, column, and highlight string of the value of - // this node in the source. - ValueLineCol(source io.ReadSeeker) (int, int, string) - - // KeyLineCol returns the line, column, and highlight string of the key for the - // value of this node in the source. - KeyLineCol(source io.ReadSeeker) (int, int, string) - - // LiteralValue returns the value of this node. - LiteralValue() interface{} - - // SliceChild returns the child node at the index specified. If this node is not - // a slice node, an empty AstNode and false is returned. - SliceChild(index int) (AstNode, bool) - - // KeyValueMap returns a map of keys and values. If this node is not a mapping - // node, nil and false are returned. - KeyValueMap() (map[string]AstNode, bool) - - // Tag returns the struct tag used in the config structure used to unmarshal. - Tag() string -} diff --git a/vendor/github.com/coreos/ignition/config/validate/validate.go b/vendor/github.com/coreos/ignition/config/validate/validate.go deleted file mode 100644 index 12f9bf205fa..00000000000 --- a/vendor/github.com/coreos/ignition/config/validate/validate.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package validate - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" - - json "github.com/ajeddeloh/go-json" - "github.com/coreos/ignition/config/validate/astjson" - "github.com/coreos/ignition/config/validate/astnode" - "github.com/coreos/ignition/config/validate/report" -) - -type validator interface { - Validate() report.Report -} - -// ValidateConfig validates a raw config object into a given config version -func ValidateConfig(rawConfig []byte, config interface{}) report.Report { - // Unmarshal again to a json.Node to get offset information for building a report - var ast json.Node - var r report.Report - configValue := reflect.ValueOf(config) - if err := json.Unmarshal(rawConfig, &ast); err != nil { - r.Add(report.Entry{ - Kind: report.EntryWarning, - Message: "Ignition could not unmarshal your config for reporting line numbers. This should never happen. Please file a bug.", - }) - r.Merge(ValidateWithoutSource(configValue)) - } else { - r.Merge(Validate(configValue, astjson.FromJsonRoot(ast), bytes.NewReader(rawConfig), true)) - } - return r -} - -// Validate walks down a struct tree calling Validate on every node that implements it, building -// A report of all the errors, warnings, info, and deprecations it encounters. If checkUnusedKeys -// is true, Validate will generate warnings for unused keys in the ast, otherwise it will not. -func Validate(vObj reflect.Value, ast astnode.AstNode, source io.ReadSeeker, checkUnusedKeys bool) (r report.Report) { - if !vObj.IsValid() { - return - } - - line, col, highlight := 0, 0, "" - if ast != nil { - line, col, highlight = ast.ValueLineCol(source) - } - - // See if we A) can call Validate on vObj, and B) should call Validate. Validate should NOT be called - // when vObj is nil, as it will panic or when vObj is a pointer to a value with Validate implemented with a - // value receiver. This is to prevent Validate being called twice, as otherwise it would be called on the - // pointer version (due to go's automatic deferencing) and once when the pointer is deferenced below. The only - // time Validate should be called on a pointer is when the function is implemented with a pointer reciever. - if obj, ok := vObj.Interface().(validator); ok && - ((vObj.Kind() != reflect.Ptr) || - (!vObj.IsNil() && !vObj.Elem().Type().Implements(reflect.TypeOf((*validator)(nil)).Elem()))) { - sub_r := obj.Validate() - sub_r.AddPosition(line, col, highlight) - r.Merge(sub_r) - - // Dont recurse on invalid inner nodes, it mostly leads to bogus messages - if sub_r.IsFatal() { - return - } - } - - switch vObj.Kind() { - case reflect.Ptr: - sub_report := Validate(vObj.Elem(), ast, source, checkUnusedKeys) - sub_report.AddPosition(line, col, "") - r.Merge(sub_report) - case reflect.Struct: - sub_report := validateStruct(vObj, ast, source, checkUnusedKeys) - sub_report.AddPosition(line, col, "") - r.Merge(sub_report) - case reflect.Slice: - for i := 0; i < vObj.Len(); i++ { - sub_node := ast - if ast != nil { - if n, ok := ast.SliceChild(i); ok { - sub_node = n - } - } - sub_report := Validate(vObj.Index(i), sub_node, source, checkUnusedKeys) - sub_report.AddPosition(line, col, "") - r.Merge(sub_report) - } - } - return -} - -func ValidateWithoutSource(cfg reflect.Value) (report report.Report) { - return Validate(cfg, nil, nil, false) -} - -type field struct { - Type reflect.StructField - Value reflect.Value -} - -// getFields returns a field of all the fields in the struct, including the fields of -// embedded structs and structs inside interface{}'s -func getFields(vObj reflect.Value) []field { - if vObj.Kind() != reflect.Struct { - return nil - } - ret := []field{} - for i := 0; i < vObj.Type().NumField(); i++ { - if vObj.Type().Field(i).Anonymous { - // in the case of an embedded type that is an alias to interface, extract the - // real type contained by the interface - realObj := reflect.ValueOf(vObj.Field(i).Interface()) - ret = append(ret, getFields(realObj)...) - } else { - ret = append(ret, field{Type: vObj.Type().Field(i), Value: vObj.Field(i)}) - } - } - return ret -} - -func validateStruct(vObj reflect.Value, ast astnode.AstNode, source io.ReadSeeker, checkUnusedKeys bool) report.Report { - r := report.Report{} - - // isFromObject will be true if this struct was unmarshalled from a JSON object. - keys, isFromObject := map[string]astnode.AstNode{}, false - if ast != nil { - keys, isFromObject = ast.KeyValueMap() - } - - // Maintain a set of key's that have been used. - usedKeys := map[string]struct{}{} - - // Maintain a list of all the tags in the struct for fuzzy matching later. - tags := []string{} - - for _, f := range getFields(vObj) { - // Default to nil astnode.AstNode if the field's corrosponding node cannot be found. - var sub_node astnode.AstNode - // Default to passing a nil source if the field's corrosponding node cannot be found. - // This ensures the line numbers reported from all sub-structs are 0 and will be changed by AddPosition - var src io.ReadSeeker - - // Try to determine the json.Node that corrosponds with the struct field - if isFromObject { - tag := strings.SplitN(f.Type.Tag.Get(ast.Tag()), ",", 2)[0] - // Save the tag so we have a list of all the tags in the struct - tags = append(tags, tag) - // mark that this key was used - usedKeys[tag] = struct{}{} - - if sub, ok := keys[tag]; ok { - // Found it - sub_node = sub - src = source - } - } - - // Default to deepest node if the node's type isn't an object, - // such as when a json string actually unmarshal to structs (like with version) - line, col := 0, 0 - highlight := "" - if ast != nil { - line, col, highlight = ast.ValueLineCol(src) - } - - // If there's a Validate func for the given field, call it - funct := vObj.MethodByName("Validate" + f.Type.Name) - if funct.IsValid() { - if sub_node != nil { - // if sub_node is non-nil, we can get better line/col info - line, col, highlight = sub_node.ValueLineCol(src) - } - res := funct.Call(nil) - sub_report := res[0].Interface().(report.Report) - sub_report.AddPosition(line, col, highlight) - r.Merge(sub_report) - } - - sub_report := Validate(f.Value, sub_node, src, checkUnusedKeys) - sub_report.AddPosition(line, col, highlight) - r.Merge(sub_report) - } - if !isFromObject || !checkUnusedKeys { - // If this struct was not unmarshalled from a JSON object, there cannot be unused keys. - return r - } - - for k, v := range keys { - if _, hasKey := usedKeys[k]; hasKey { - continue - } - line, col, highlight := v.KeyLineCol(source) - typo := similar(k, tags) - - r.Add(report.Entry{ - Kind: report.EntryWarning, - Message: fmt.Sprintf("Config has unrecognized key: %s", k), - Line: line, - Column: col, - Highlight: highlight, - }) - - if typo != "" { - r.Add(report.Entry{ - Kind: report.EntryInfo, - Message: fmt.Sprintf("Did you mean %s instead of %s", typo, k), - Line: line, - Column: col, - Highlight: highlight, - }) - } - } - - return r -} - -// similar returns a string in candidates that is similar to str. Currently it just does case -// insensitive comparison, but it should be updated to use levenstein distances to catch typos -func similar(str string, candidates []string) string { - for _, candidate := range candidates { - if strings.EqualFold(str, candidate) { - return candidate - } - } - return "" -} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE deleted file mode 100644 index e06d2081865..00000000000 --- a/vendor/github.com/coreos/pkg/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE deleted file mode 100644 index b39ddfa5cbd..00000000000 --- a/vendor/github.com/coreos/pkg/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2014 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go deleted file mode 100644 index b305a845fb2..00000000000 --- a/vendor/github.com/coreos/pkg/capnslog/formatters.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "bufio" - "fmt" - "io" - "log" - "runtime" - "strings" - "time" -) - -type Formatter interface { - Format(pkg string, level LogLevel, depth int, entries ...interface{}) - Flush() -} - -func NewStringFormatter(w io.Writer) Formatter { - return &StringFormatter{ - w: bufio.NewWriter(w), - } -} - -type StringFormatter struct { - w *bufio.Writer -} - -func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { - now := time.Now().UTC() - s.w.WriteString(now.Format(time.RFC3339)) - s.w.WriteByte(' ') - writeEntries(s.w, pkg, l, i, entries...) - s.Flush() -} - -func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { - if pkg != "" { - w.WriteString(pkg + ": ") - } - str := fmt.Sprint(entries...) - endsInNL := strings.HasSuffix(str, "\n") - w.WriteString(str) - if !endsInNL { - w.WriteString("\n") - } -} - -func (s *StringFormatter) Flush() { - s.w.Flush() -} - -func NewPrettyFormatter(w io.Writer, debug bool) Formatter { - return &PrettyFormatter{ - w: bufio.NewWriter(w), - debug: debug, - } -} - -type PrettyFormatter struct { - w *bufio.Writer - debug bool -} - -func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { - now := time.Now() - ts := now.Format("2006-01-02 15:04:05") - c.w.WriteString(ts) - ms := now.Nanosecond() / 1000 - c.w.WriteString(fmt.Sprintf(".%06d", ms)) - if c.debug { - _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - if line < 0 { - line = 0 // not a real line number - } - c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) - } - c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) - writeEntries(c.w, pkg, l, depth, entries...) - c.Flush() -} - -func (c *PrettyFormatter) Flush() { - c.w.Flush() -} - -// LogFormatter emulates the form of the traditional built-in logger. -type LogFormatter struct { - logger *log.Logger - prefix string -} - -// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the -// golang log package to actually do the logging work so that logs look similar. -func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { - return &LogFormatter{ - logger: log.New(w, "", flag), // don't use prefix here - prefix: prefix, // save it instead - } -} - -// Format builds a log message for the LogFormatter. The LogLevel is ignored. -func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { - str := fmt.Sprint(entries...) - prefix := lf.prefix - if pkg != "" { - prefix = fmt.Sprintf("%s%s: ", prefix, pkg) - } - lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 -} - -// Flush is included so that the interface is complete, but is a no-op. -func (lf *LogFormatter) Flush() { - // noop -} - -// NilFormatter is a no-op log formatter that does nothing. -type NilFormatter struct { -} - -// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no -// messages so that you can cause part of your logging to be silent. -func NewNilFormatter() Formatter { - return &NilFormatter{} -} - -// Format does nothing. -func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { - // noop -} - -// Flush is included so that the interface is complete, but is a no-op. -func (_ *NilFormatter) Flush() { - // noop -} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go deleted file mode 100644 index 426603ef305..00000000000 --- a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "bufio" - "bytes" - "io" - "os" - "runtime" - "strconv" - "strings" - "time" -) - -var pid = os.Getpid() - -type GlogFormatter struct { - StringFormatter -} - -func NewGlogFormatter(w io.Writer) *GlogFormatter { - g := &GlogFormatter{} - g.w = bufio.NewWriter(w) - return g -} - -func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { - g.w.Write(GlogHeader(level, depth+1)) - g.StringFormatter.Format(pkg, level, depth+1, entries...) -} - -func GlogHeader(level LogLevel, depth int) []byte { - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - now := time.Now().UTC() - _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - if line < 0 { - line = 0 // not a real line number - } - buf := &bytes.Buffer{} - buf.Grow(30) - _, month, day := now.Date() - hour, minute, second := now.Clock() - buf.WriteString(level.Char()) - twoDigits(buf, int(month)) - twoDigits(buf, day) - buf.WriteByte(' ') - twoDigits(buf, hour) - buf.WriteByte(':') - twoDigits(buf, minute) - buf.WriteByte(':') - twoDigits(buf, second) - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) - buf.WriteByte('Z') - buf.WriteByte(' ') - buf.WriteString(strconv.Itoa(pid)) - buf.WriteByte(' ') - buf.WriteString(file) - buf.WriteByte(':') - buf.WriteString(strconv.Itoa(line)) - buf.WriteByte(']') - buf.WriteByte(' ') - return buf.Bytes() -} - -const digits = "0123456789" - -func twoDigits(b *bytes.Buffer, d int) { - c2 := digits[d%10] - d /= 10 - c1 := digits[d%10] - b.WriteByte(c1) - b.WriteByte(c2) -} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go deleted file mode 100644 index 44b8cd361b0..00000000000 --- a/vendor/github.com/coreos/pkg/capnslog/init.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "io" - "os" - "syscall" -) - -// Here's where the opinionation comes in. We need some sensible defaults, -// especially after taking over the log package. Your project (whatever it may -// be) may see things differently. That's okay; there should be no defaults in -// the main package that cannot be controlled or overridden programatically, -// otherwise it's a bug. Doing so is creating your own init_log.go file much -// like this one. - -func init() { - initHijack() - - // Go `log` pacakge uses os.Stderr. - SetFormatter(NewDefaultFormatter(os.Stderr)) - SetGlobalLogLevel(INFO) -} - -func NewDefaultFormatter(out io.Writer) Formatter { - if syscall.Getppid() == 1 { - // We're running under init, which may be systemd. - f, err := NewJournaldFormatter() - if err == nil { - return f - } - } - return NewPrettyFormatter(out, false) -} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go deleted file mode 100644 index 45530506537..00000000000 --- a/vendor/github.com/coreos/pkg/capnslog/init_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import "os" - -func init() { - initHijack() - - // Go `log` package uses os.Stderr. - SetFormatter(NewPrettyFormatter(os.Stderr, false)) - SetGlobalLogLevel(INFO) -} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go deleted file mode 100644 index 72e05207c52..00000000000 --- a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/coreos/go-systemd/journal" -) - -func NewJournaldFormatter() (Formatter, error) { - if !journal.Enabled() { - return nil, errors.New("No systemd detected") - } - return &journaldFormatter{}, nil -} - -type journaldFormatter struct{} - -func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { - var pri journal.Priority - switch l { - case CRITICAL: - pri = journal.PriCrit - case ERROR: - pri = journal.PriErr - case WARNING: - pri = journal.PriWarning - case NOTICE: - pri = journal.PriNotice - case INFO: - pri = journal.PriInfo - case DEBUG: - pri = journal.PriDebug - case TRACE: - pri = journal.PriDebug - default: - panic("Unhandled loglevel") - } - msg := fmt.Sprint(entries...) - tags := map[string]string{ - "PACKAGE": pkg, - "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), - } - err := journal.Send(msg, pri, tags) - if err != nil { - fmt.Fprintln(os.Stderr, err) - } -} - -func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go deleted file mode 100644 index 970086b9f97..00000000000 --- a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "log" -) - -func initHijack() { - pkg := NewPackageLogger("log", "") - w := packageWriter{pkg} - log.SetFlags(0) - log.SetPrefix("") - log.SetOutput(w) -} - -type packageWriter struct { - pl *PackageLogger -} - -func (p packageWriter) Write(b []byte) (int, error) { - if p.pl.level < INFO { - return 0, nil - } - p.pl.internalLog(calldepth+2, INFO, string(b)) - return len(b), nil -} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go deleted file mode 100644 index 84954488308..00000000000 --- a/vendor/github.com/coreos/pkg/capnslog/logmap.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "errors" - "strings" - "sync" -) - -// LogLevel is the set of all log levels. -type LogLevel int8 - -const ( - // CRITICAL is the lowest log level; only errors which will end the program will be propagated. - CRITICAL LogLevel = iota - 1 - // ERROR is for errors that are not fatal but lead to troubling behavior. - ERROR - // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. - WARNING - // NOTICE is for normal but significant conditions. - NOTICE - // INFO is a log level for common, everyday log updates. - INFO - // DEBUG is the default hidden level for more verbose updates about internal processes. - DEBUG - // TRACE is for (potentially) call by call tracing of programs. - TRACE -) - -// Char returns a single-character representation of the log level. -func (l LogLevel) Char() string { - switch l { - case CRITICAL: - return "C" - case ERROR: - return "E" - case WARNING: - return "W" - case NOTICE: - return "N" - case INFO: - return "I" - case DEBUG: - return "D" - case TRACE: - return "T" - default: - panic("Unhandled loglevel") - } -} - -// String returns a multi-character representation of the log level. -func (l LogLevel) String() string { - switch l { - case CRITICAL: - return "CRITICAL" - case ERROR: - return "ERROR" - case WARNING: - return "WARNING" - case NOTICE: - return "NOTICE" - case INFO: - return "INFO" - case DEBUG: - return "DEBUG" - case TRACE: - return "TRACE" - default: - panic("Unhandled loglevel") - } -} - -// Update using the given string value. Fulfills the flag.Value interface. -func (l *LogLevel) Set(s string) error { - value, err := ParseLevel(s) - if err != nil { - return err - } - - *l = value - return nil -} - -// ParseLevel translates some potential loglevel strings into their corresponding levels. -func ParseLevel(s string) (LogLevel, error) { - switch s { - case "CRITICAL", "C": - return CRITICAL, nil - case "ERROR", "0", "E": - return ERROR, nil - case "WARNING", "1", "W": - return WARNING, nil - case "NOTICE", "2", "N": - return NOTICE, nil - case "INFO", "3", "I": - return INFO, nil - case "DEBUG", "4", "D": - return DEBUG, nil - case "TRACE", "5", "T": - return TRACE, nil - } - return CRITICAL, errors.New("couldn't parse log level " + s) -} - -type RepoLogger map[string]*PackageLogger - -type loggerStruct struct { - sync.Mutex - repoMap map[string]RepoLogger - formatter Formatter -} - -// logger is the global logger -var logger = new(loggerStruct) - -// SetGlobalLogLevel sets the log level for all packages in all repositories -// registered with capnslog. -func SetGlobalLogLevel(l LogLevel) { - logger.Lock() - defer logger.Unlock() - for _, r := range logger.repoMap { - r.setRepoLogLevelInternal(l) - } -} - -// GetRepoLogger may return the handle to the repository's set of packages' loggers. -func GetRepoLogger(repo string) (RepoLogger, error) { - logger.Lock() - defer logger.Unlock() - r, ok := logger.repoMap[repo] - if !ok { - return nil, errors.New("no packages registered for repo " + repo) - } - return r, nil -} - -// MustRepoLogger returns the handle to the repository's packages' loggers. -func MustRepoLogger(repo string) RepoLogger { - r, err := GetRepoLogger(repo) - if err != nil { - panic(err) - } - return r -} - -// SetRepoLogLevel sets the log level for all packages in the repository. -func (r RepoLogger) SetRepoLogLevel(l LogLevel) { - logger.Lock() - defer logger.Unlock() - r.setRepoLogLevelInternal(l) -} - -func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { - for _, v := range r { - v.level = l - } -} - -// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in -// order, and returns a map of the results, for use in SetLogLevel. -func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { - setlist := strings.Split(conf, ",") - out := make(map[string]LogLevel) - for _, setstring := range setlist { - setting := strings.Split(setstring, "=") - if len(setting) != 2 { - return nil, errors.New("oddly structured `pkg=level` option: " + setstring) - } - l, err := ParseLevel(setting[1]) - if err != nil { - return nil, err - } - out[setting[0]] = l - } - return out, nil -} - -// SetLogLevel takes a map of package names within a repository to their desired -// loglevel, and sets the levels appropriately. Unknown packages are ignored. -// "*" is a special package name that corresponds to all packages, and will be -// processed first. -func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { - logger.Lock() - defer logger.Unlock() - if l, ok := m["*"]; ok { - r.setRepoLogLevelInternal(l) - } - for k, v := range m { - l, ok := r[k] - if !ok { - continue - } - l.level = v - } -} - -// SetFormatter sets the formatting function for all logs. -func SetFormatter(f Formatter) { - logger.Lock() - defer logger.Unlock() - logger.formatter = f -} - -// NewPackageLogger creates a package logger object. -// This should be defined as a global var in your package, referencing your repo. -func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { - logger.Lock() - defer logger.Unlock() - if logger.repoMap == nil { - logger.repoMap = make(map[string]RepoLogger) - } - r, rok := logger.repoMap[repo] - if !rok { - logger.repoMap[repo] = make(RepoLogger) - r = logger.repoMap[repo] - } - p, pok := r[pkg] - if !pok { - r[pkg] = &PackageLogger{ - pkg: pkg, - level: INFO, - } - p = r[pkg] - } - return -} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go deleted file mode 100644 index 612d55c66c8..00000000000 --- a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "fmt" - "os" -) - -type PackageLogger struct { - pkg string - level LogLevel -} - -const calldepth = 2 - -func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { - logger.Lock() - defer logger.Unlock() - if inLevel != CRITICAL && p.level < inLevel { - return - } - if logger.formatter != nil { - logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) - } -} - -func (p *PackageLogger) LevelAt(l LogLevel) bool { - logger.Lock() - defer logger.Unlock() - return p.level >= l -} - -// Log a formatted string at any level between ERROR and TRACE -func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { - p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) -} - -// Log a message at any level between ERROR and TRACE -func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { - p.internalLog(calldepth, l, fmt.Sprint(args...)) -} - -// log stdlib compatibility - -func (p *PackageLogger) Println(args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) -} - -func (p *PackageLogger) Printf(format string, args ...interface{}) { - p.Logf(INFO, format, args...) -} - -func (p *PackageLogger) Print(args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprint(args...)) -} - -// Panic and fatal - -func (p *PackageLogger) Panicf(format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - p.internalLog(calldepth, CRITICAL, s) - panic(s) -} - -func (p *PackageLogger) Panic(args ...interface{}) { - s := fmt.Sprint(args...) - p.internalLog(calldepth, CRITICAL, s) - panic(s) -} - -func (p *PackageLogger) Fatalf(format string, args ...interface{}) { - p.Logf(CRITICAL, format, args...) - os.Exit(1) -} - -func (p *PackageLogger) Fatal(args ...interface{}) { - s := fmt.Sprint(args...) - p.internalLog(calldepth, CRITICAL, s) - os.Exit(1) -} - -func (p *PackageLogger) Fatalln(args ...interface{}) { - s := fmt.Sprintln(args...) - p.internalLog(calldepth, CRITICAL, s) - os.Exit(1) -} - -// Error Functions - -func (p *PackageLogger) Errorf(format string, args ...interface{}) { - p.Logf(ERROR, format, args...) -} - -func (p *PackageLogger) Error(entries ...interface{}) { - p.internalLog(calldepth, ERROR, entries...) -} - -// Warning Functions - -func (p *PackageLogger) Warningf(format string, args ...interface{}) { - p.Logf(WARNING, format, args...) -} - -func (p *PackageLogger) Warning(entries ...interface{}) { - p.internalLog(calldepth, WARNING, entries...) -} - -// Notice Functions - -func (p *PackageLogger) Noticef(format string, args ...interface{}) { - p.Logf(NOTICE, format, args...) -} - -func (p *PackageLogger) Notice(entries ...interface{}) { - p.internalLog(calldepth, NOTICE, entries...) -} - -// Info Functions - -func (p *PackageLogger) Infof(format string, args ...interface{}) { - p.Logf(INFO, format, args...) -} - -func (p *PackageLogger) Info(entries ...interface{}) { - p.internalLog(calldepth, INFO, entries...) -} - -// Debug Functions - -func (p *PackageLogger) Debugf(format string, args ...interface{}) { - if p.level < DEBUG { - return - } - p.Logf(DEBUG, format, args...) -} - -func (p *PackageLogger) Debug(entries ...interface{}) { - if p.level < DEBUG { - return - } - p.internalLog(calldepth, DEBUG, entries...) -} - -// Trace Functions - -func (p *PackageLogger) Tracef(format string, args ...interface{}) { - if p.level < TRACE { - return - } - p.Logf(TRACE, format, args...) -} - -func (p *PackageLogger) Trace(entries ...interface{}) { - if p.level < TRACE { - return - } - p.internalLog(calldepth, TRACE, entries...) -} - -func (p *PackageLogger) Flush() { - logger.Lock() - defer logger.Unlock() - logger.formatter.Flush() -} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go deleted file mode 100644 index 4be5a1f2de3..00000000000 --- a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "fmt" - "log/syslog" -) - -func NewSyslogFormatter(w *syslog.Writer) Formatter { - return &syslogFormatter{w} -} - -func NewDefaultSyslogFormatter(tag string) (Formatter, error) { - w, err := syslog.New(syslog.LOG_DEBUG, tag) - if err != nil { - return nil, err - } - return NewSyslogFormatter(w), nil -} - -type syslogFormatter struct { - w *syslog.Writer -} - -func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { - for _, entry := range entries { - str := fmt.Sprint(entry) - switch l { - case CRITICAL: - s.w.Crit(str) - case ERROR: - s.w.Err(str) - case WARNING: - s.w.Warning(str) - case NOTICE: - s.w.Notice(str) - case INFO: - s.w.Info(str) - case DEBUG: - s.w.Debug(str) - case TRACE: - s.w.Debug(str) - default: - panic("Unhandled loglevel") - } - } -} - -func (s *syslogFormatter) Flush() { -} diff --git a/vendor/github.com/coreos/pkg/health/health.go b/vendor/github.com/coreos/pkg/health/health.go deleted file mode 100644 index a1c3610fa5c..00000000000 --- a/vendor/github.com/coreos/pkg/health/health.go +++ /dev/null @@ -1,127 +0,0 @@ -package health - -import ( - "expvar" - "fmt" - "log" - "net/http" - - "github.com/coreos/pkg/httputil" -) - -// Checkables should return nil when the thing they are checking is healthy, and an error otherwise. -type Checkable interface { - Healthy() error -} - -// Checker provides a way to make an endpoint which can be probed for system health. -type Checker struct { - // Checks are the Checkables to be checked when probing. - Checks []Checkable - - // Unhealthyhandler is called when one or more of the checks are unhealthy. - // If not provided DefaultUnhealthyHandler is called. - UnhealthyHandler UnhealthyHandler - - // HealthyHandler is called when all checks are healthy. - // If not provided, DefaultHealthyHandler is called. - HealthyHandler http.HandlerFunc -} - -func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) { - unhealthyHandler := c.UnhealthyHandler - if unhealthyHandler == nil { - unhealthyHandler = DefaultUnhealthyHandler - } - - successHandler := c.HealthyHandler - if successHandler == nil { - successHandler = DefaultHealthyHandler - } - - if r.Method != "GET" { - w.Header().Set("Allow", "GET") - w.WriteHeader(http.StatusMethodNotAllowed) - return - } - - if err := Check(c.Checks); err != nil { - unhealthyHandler(w, r, err) - return - } - - successHandler(w, r) -} - -type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error) - -type StatusResponse struct { - Status string `json:"status"` - Details *StatusResponseDetails `json:"details,omitempty"` -} - -type StatusResponseDetails struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func Check(checks []Checkable) (err error) { - errs := []error{} - for _, c := range checks { - if e := c.Healthy(); e != nil { - errs = append(errs, e) - } - } - - switch len(errs) { - case 0: - err = nil - case 1: - err = errs[0] - default: - err = fmt.Errorf("multiple health check failure: %v", errs) - } - - return -} - -func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) { - err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{ - Status: "ok", - }) - if err != nil { - // TODO(bobbyrullo): replace with logging from new logging pkg, - // once it lands. - log.Printf("Failed to write JSON response: %v", err) - } -} - -func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) { - writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{ - Status: "error", - Details: &StatusResponseDetails{ - Code: http.StatusInternalServerError, - Message: err.Error(), - }, - }) - if writeErr != nil { - // TODO(bobbyrullo): replace with logging from new logging pkg, - // once it lands. - log.Printf("Failed to write JSON response: %v", err) - } -} - -// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported. -func ExpvarHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} diff --git a/vendor/github.com/coreos/pkg/httputil/cookie.go b/vendor/github.com/coreos/pkg/httputil/cookie.go deleted file mode 100644 index c37a37bb27c..00000000000 --- a/vendor/github.com/coreos/pkg/httputil/cookie.go +++ /dev/null @@ -1,21 +0,0 @@ -package httputil - -import ( - "net/http" - "time" -) - -// DeleteCookies effectively deletes all named cookies -// by wiping all data and setting to expire immediately. -func DeleteCookies(w http.ResponseWriter, cookieNames ...string) { - for _, n := range cookieNames { - c := &http.Cookie{ - Name: n, - Value: "", - Path: "/", - MaxAge: -1, - Expires: time.Time{}, - } - http.SetCookie(w, c) - } -} diff --git a/vendor/github.com/coreos/pkg/httputil/json.go b/vendor/github.com/coreos/pkg/httputil/json.go deleted file mode 100644 index 0b09235033f..00000000000 --- a/vendor/github.com/coreos/pkg/httputil/json.go +++ /dev/null @@ -1,27 +0,0 @@ -package httputil - -import ( - "encoding/json" - "net/http" -) - -const ( - JSONContentType = "application/json" -) - -func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error { - enc, err := json.Marshal(resp) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return err - } - - w.Header().Set("Content-Type", JSONContentType) - w.WriteHeader(code) - - _, err = w.Write(enc) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/coreos/pkg/timeutil/backoff.go b/vendor/github.com/coreos/pkg/timeutil/backoff.go deleted file mode 100644 index b34fb49661b..00000000000 --- a/vendor/github.com/coreos/pkg/timeutil/backoff.go +++ /dev/null @@ -1,15 +0,0 @@ -package timeutil - -import ( - "time" -) - -func ExpBackoff(prev, max time.Duration) time.Duration { - if prev == 0 { - return time.Second - } - if prev > max/2 { - return max - } - return 2 * prev -} diff --git a/vendor/github.com/coreos/tectonic-config/config/tectonic-utility/config.go b/vendor/github.com/coreos/tectonic-config/config/tectonic-utility/config.go deleted file mode 100644 index 2ac4c408741..00000000000 --- a/vendor/github.com/coreos/tectonic-config/config/tectonic-utility/config.go +++ /dev/null @@ -1,34 +0,0 @@ -package tectonicutility - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // Kind is the TypeMeta.Kind for the OperatorConfig. - Kind = "TectonicUtilityOperatorConfig" - // APIVersion is the TypeMeta.APIVersion for the OperatorConfig. - APIVersion = "v1" -) - -// OperatorConfig defines the config for Tectonic Utility Operator. -type OperatorConfig struct { - metav1.TypeMeta `json:",inline"` - StatsEmitterConfig `json:"statsEmitterConfig"` - TectonicConfigMapConfig `json:"tectonicConfigMap"` -} - -// StatsEmitterConfig defines the config for Tectonic Stats Emitter. -type StatsEmitterConfig struct { - StatsURL string `json:"statsURL"` -} - -// TectonicConfigMapConfig defines the variables that will be used by the Tectonic ConfigMap. -type TectonicConfigMapConfig struct { - ClusterID string `json:"clusterID"` - ClusterName string `json:"clusterName"` - CertificatesStrategy string `json:"certificatesStrategy"` - InstallerPlatform string `json:"installerPlatform"` - KubeAPIServerURL string `json:"kubeAPIserverURL"` - TectonicVersion string `json:"tectonicVersion"` -} diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE b/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE deleted file mode 100644 index 5782c72690e..00000000000 --- a/vendor/github.com/elazarl/go-bindata-assetfs/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2014, Elazar Leibovich -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go b/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go deleted file mode 100644 index 5174d5a6d3c..00000000000 --- a/vendor/github.com/elazarl/go-bindata-assetfs/assetfs.go +++ /dev/null @@ -1,147 +0,0 @@ -package assetfs - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "net/http" - "os" - "path" - "path/filepath" - "time" -) - -var ( - fileTimestamp = time.Now() -) - -// FakeFile implements os.FileInfo interface for a given path and size -type FakeFile struct { - // Path is the path of this file - Path string - // Dir marks of the path is a directory - Dir bool - // Len is the length of the fake file, zero if it is a directory - Len int64 -} - -func (f *FakeFile) Name() string { - _, name := filepath.Split(f.Path) - return name -} - -func (f *FakeFile) Mode() os.FileMode { - mode := os.FileMode(0644) - if f.Dir { - return mode | os.ModeDir - } - return mode -} - -func (f *FakeFile) ModTime() time.Time { - return fileTimestamp -} - -func (f *FakeFile) Size() int64 { - return f.Len -} - -func (f *FakeFile) IsDir() bool { - return f.Mode().IsDir() -} - -func (f *FakeFile) Sys() interface{} { - return nil -} - -// AssetFile implements http.File interface for a no-directory file with content -type AssetFile struct { - *bytes.Reader - io.Closer - FakeFile -} - -func NewAssetFile(name string, content []byte) *AssetFile { - return &AssetFile{ - bytes.NewReader(content), - ioutil.NopCloser(nil), - FakeFile{name, false, int64(len(content))}} -} - -func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) { - return nil, errors.New("not a directory") -} - -func (f *AssetFile) Size() int64 { - return f.FakeFile.Size() -} - -func (f *AssetFile) Stat() (os.FileInfo, error) { - return f, nil -} - -// AssetDirectory implements http.File interface for a directory -type AssetDirectory struct { - AssetFile - ChildrenRead int - Children []os.FileInfo -} - -func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirectory { - fileinfos := make([]os.FileInfo, 0, len(children)) - for _, child := range children { - _, err := fs.AssetDir(filepath.Join(name, child)) - fileinfos = append(fileinfos, &FakeFile{child, err == nil, 0}) - } - return &AssetDirectory{ - AssetFile{ - bytes.NewReader(nil), - ioutil.NopCloser(nil), - FakeFile{name, true, 0}, - }, - 0, - fileinfos} -} - -func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) { - if count <= 0 { - return f.Children, nil - } - if f.ChildrenRead+count > len(f.Children) { - count = len(f.Children) - f.ChildrenRead - } - rv := f.Children[f.ChildrenRead : f.ChildrenRead+count] - f.ChildrenRead += count - return rv, nil -} - -func (f *AssetDirectory) Stat() (os.FileInfo, error) { - return f, nil -} - -// AssetFS implements http.FileSystem, allowing -// embedded files to be served from net/http package. -type AssetFS struct { - // Asset should return content of file in path if exists - Asset func(path string) ([]byte, error) - // AssetDir should return list of files in the path - AssetDir func(path string) ([]string, error) - // Prefix would be prepended to http requests - Prefix string -} - -func (fs *AssetFS) Open(name string) (http.File, error) { - name = path.Join(fs.Prefix, name) - if len(name) > 0 && name[0] == '/' { - name = name[1:] - } - if b, err := fs.Asset(name); err == nil { - return NewAssetFile(name, b), nil - } - if children, err := fs.AssetDir(name); err == nil { - return NewAssetDirectory(name, children, fs), nil - } else { - return nil, err - } -} diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/doc.go b/vendor/github.com/elazarl/go-bindata-assetfs/doc.go deleted file mode 100644 index a664249f342..00000000000 --- a/vendor/github.com/elazarl/go-bindata-assetfs/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// assetfs allows packages to serve static content embedded -// with the go-bindata tool with the standard net/http package. -// -// See https://github.com/jteeuwen/go-bindata for more information -// about embedding binary data with go-bindata. -// -// Usage example, after running -// $ go-bindata data/... -// use: -// http.Handle("/", -// http.FileServer( -// &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"})) -package assetfs diff --git a/vendor/github.com/emicklei/go-restful-swagger12/LICENSE b/vendor/github.com/emicklei/go-restful-swagger12/LICENSE deleted file mode 100644 index aeab5b440e8..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2017 Ernest Micklei - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go b/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go deleted file mode 100644 index 9f4c3690acb..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go +++ /dev/null @@ -1,64 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// ApiDeclarationList maintains an ordered list of ApiDeclaration. -type ApiDeclarationList struct { - List []ApiDeclaration -} - -// At returns the ApiDeclaration by its path unless absent, then ok is false -func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) { - for _, each := range l.List { - if each.ResourcePath == path { - return each, true - } - } - return a, false -} - -// Put adds or replaces a ApiDeclaration with this name -func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) { - // maybe replace existing - for i, each := range l.List { - if each.ResourcePath == path { - // replace - l.List[i] = a - return - } - } - // add - l.List = append(l.List, a) -} - -// Do enumerates all the properties, each with its assigned name -func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) { - for _, each := range l.List { - block(each.ResourcePath, each) - } -} - -// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty -func (l ApiDeclarationList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.ResourcePath) - buf.WriteString("\": ") - encoder.Encode(each) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/config.go b/vendor/github.com/emicklei/go-restful-swagger12/config.go deleted file mode 100644 index 18f8e57d903..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/config.go +++ /dev/null @@ -1,46 +0,0 @@ -package swagger - -import ( - "net/http" - "reflect" - - "github.com/emicklei/go-restful" -) - -// PostBuildDeclarationMapFunc can be used to modify the api declaration map. -type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList) - -// MapSchemaFormatFunc can be used to modify typeName at definition time. -type MapSchemaFormatFunc func(typeName string) string - -// MapModelTypeNameFunc can be used to return the desired typeName for a given -// type. It will return false if the default name should be used. -type MapModelTypeNameFunc func(t reflect.Type) (string, bool) - -type Config struct { - // url where the services are available, e.g. http://localhost:8080 - // if left empty then the basePath of Swagger is taken from the actual request - WebServicesUrl string - // path where the JSON api is avaiable , e.g. /apidocs - ApiPath string - // [optional] path where the swagger UI will be served, e.g. /swagger - SwaggerPath string - // [optional] location of folder containing Swagger HTML5 application index.html - SwaggerFilePath string - // api listing is constructed from this list of restful WebServices. - WebServices []*restful.WebService - // will serve all static content (scripts,pages,images) - StaticHandler http.Handler - // [optional] on default CORS (Cross-Origin-Resource-Sharing) is enabled. - DisableCORS bool - // Top-level API version. Is reflected in the resource listing. - ApiVersion string - // If set then call this handler after building the complete ApiDeclaration Map - PostBuildHandler PostBuildDeclarationMapFunc - // Swagger global info struct - Info Info - // [optional] If set, model builder should call this handler to get addition typename-to-swagger-format-field conversion. - SchemaFormatHandler MapSchemaFormatFunc - // [optional] If set, model builder should call this handler to retrieve the name for a given type. - ModelTypeNameHandler MapModelTypeNameFunc -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go b/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go deleted file mode 100644 index d40786f251f..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_builder.go +++ /dev/null @@ -1,467 +0,0 @@ -package swagger - -import ( - "encoding/json" - "reflect" - "strings" -) - -// ModelBuildable is used for extending Structs that need more control over -// how the Model appears in the Swagger api declaration. -type ModelBuildable interface { - PostBuildModel(m *Model) *Model -} - -type modelBuilder struct { - Models *ModelList - Config *Config -} - -type documentable interface { - SwaggerDoc() map[string]string -} - -// Check if this structure has a method with signature func () SwaggerDoc() map[string]string -// If it exists, retrive the documentation and overwrite all struct tag descriptions -func getDocFromMethodSwaggerDoc2(model reflect.Type) map[string]string { - if docable, ok := reflect.New(model).Elem().Interface().(documentable); ok { - return docable.SwaggerDoc() - } - return make(map[string]string) -} - -// addModelFrom creates and adds a Model to the builder and detects and calls -// the post build hook for customizations -func (b modelBuilder) addModelFrom(sample interface{}) { - if modelOrNil := b.addModel(reflect.TypeOf(sample), ""); modelOrNil != nil { - // allow customizations - if buildable, ok := sample.(ModelBuildable); ok { - modelOrNil = buildable.PostBuildModel(modelOrNil) - b.Models.Put(modelOrNil.Id, *modelOrNil) - } - } -} - -func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model { - // Turn pointers into simpler types so further checks are - // correct. - if st.Kind() == reflect.Ptr { - st = st.Elem() - } - - modelName := b.keyFrom(st) - if nameOverride != "" { - modelName = nameOverride - } - // no models needed for primitive types - if b.isPrimitiveType(modelName) { - return nil - } - // golang encoding/json packages says array and slice values encode as - // JSON arrays, except that []byte encodes as a base64-encoded string. - // If we see a []byte here, treat it at as a primitive type (string) - // and deal with it in buildArrayTypeProperty. - if (st.Kind() == reflect.Slice || st.Kind() == reflect.Array) && - st.Elem().Kind() == reflect.Uint8 { - return nil - } - // see if we already have visited this model - if _, ok := b.Models.At(modelName); ok { - return nil - } - sm := Model{ - Id: modelName, - Required: []string{}, - Properties: ModelPropertyList{}} - - // reference the model before further initializing (enables recursive structs) - b.Models.Put(modelName, sm) - - // check for slice or array - if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { - b.addModel(st.Elem(), "") - return &sm - } - // check for structure or primitive type - if st.Kind() != reflect.Struct { - return &sm - } - - fullDoc := getDocFromMethodSwaggerDoc2(st) - modelDescriptions := []string{} - - for i := 0; i < st.NumField(); i++ { - field := st.Field(i) - jsonName, modelDescription, prop := b.buildProperty(field, &sm, modelName) - if len(modelDescription) > 0 { - modelDescriptions = append(modelDescriptions, modelDescription) - } - - // add if not omitted - if len(jsonName) != 0 { - // update description - if fieldDoc, ok := fullDoc[jsonName]; ok { - prop.Description = fieldDoc - } - // update Required - if b.isPropertyRequired(field) { - sm.Required = append(sm.Required, jsonName) - } - sm.Properties.Put(jsonName, prop) - } - } - - // We always overwrite documentation if SwaggerDoc method exists - // "" is special for documenting the struct itself - if modelDoc, ok := fullDoc[""]; ok { - sm.Description = modelDoc - } else if len(modelDescriptions) != 0 { - sm.Description = strings.Join(modelDescriptions, "\n") - } - - // update model builder with completed model - b.Models.Put(modelName, sm) - - return &sm -} - -func (b modelBuilder) isPropertyRequired(field reflect.StructField) bool { - required := true - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if len(s) > 1 && s[1] == "omitempty" { - return false - } - } - return required -} - -func (b modelBuilder) buildProperty(field reflect.StructField, model *Model, modelName string) (jsonName, modelDescription string, prop ModelProperty) { - jsonName = b.jsonNameOfField(field) - if len(jsonName) == 0 { - // empty name signals skip property - return "", "", prop - } - - if field.Name == "XMLName" && field.Type.String() == "xml.Name" { - // property is metadata for the xml.Name attribute, can be skipped - return "", "", prop - } - - if tag := field.Tag.Get("modelDescription"); tag != "" { - modelDescription = tag - } - - prop.setPropertyMetadata(field) - if prop.Type != nil { - return jsonName, modelDescription, prop - } - fieldType := field.Type - - // check if type is doing its own marshalling - marshalerType := reflect.TypeOf((*json.Marshaler)(nil)).Elem() - if fieldType.Implements(marshalerType) { - var pType = "string" - if prop.Type == nil { - prop.Type = &pType - } - if prop.Format == "" { - prop.Format = b.jsonSchemaFormat(b.keyFrom(fieldType)) - } - return jsonName, modelDescription, prop - } - - // check if annotation says it is a string - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if len(s) > 1 && s[1] == "string" { - stringt := "string" - prop.Type = &stringt - return jsonName, modelDescription, prop - } - } - - fieldKind := fieldType.Kind() - switch { - case fieldKind == reflect.Struct: - jsonName, prop := b.buildStructTypeProperty(field, jsonName, model) - return jsonName, modelDescription, prop - case fieldKind == reflect.Slice || fieldKind == reflect.Array: - jsonName, prop := b.buildArrayTypeProperty(field, jsonName, modelName) - return jsonName, modelDescription, prop - case fieldKind == reflect.Ptr: - jsonName, prop := b.buildPointerTypeProperty(field, jsonName, modelName) - return jsonName, modelDescription, prop - case fieldKind == reflect.String: - stringt := "string" - prop.Type = &stringt - return jsonName, modelDescription, prop - case fieldKind == reflect.Map: - // if it's a map, it's unstructured, and swagger 1.2 can't handle it - objectType := "object" - prop.Type = &objectType - return jsonName, modelDescription, prop - } - - fieldTypeName := b.keyFrom(fieldType) - if b.isPrimitiveType(fieldTypeName) { - mapped := b.jsonSchemaType(fieldTypeName) - prop.Type = &mapped - prop.Format = b.jsonSchemaFormat(fieldTypeName) - return jsonName, modelDescription, prop - } - modelType := b.keyFrom(fieldType) - prop.Ref = &modelType - - if fieldType.Name() == "" { // override type of anonymous structs - nestedTypeName := modelName + "." + jsonName - prop.Ref = &nestedTypeName - b.addModel(fieldType, nestedTypeName) - } - return jsonName, modelDescription, prop -} - -func hasNamedJSONTag(field reflect.StructField) bool { - parts := strings.Split(field.Tag.Get("json"), ",") - if len(parts) == 0 { - return false - } - for _, s := range parts[1:] { - if s == "inline" { - return false - } - } - return len(parts[0]) > 0 -} - -func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonName string, model *Model) (nameJson string, prop ModelProperty) { - prop.setPropertyMetadata(field) - // Check for type override in tag - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - // check for anonymous - if len(fieldType.Name()) == 0 { - // anonymous - anonType := model.Id + "." + jsonName - b.addModel(fieldType, anonType) - prop.Ref = &anonType - return jsonName, prop - } - - if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) { - // embedded struct - sub := modelBuilder{new(ModelList), b.Config} - sub.addModel(fieldType, "") - subKey := sub.keyFrom(fieldType) - // merge properties from sub - subModel, _ := sub.Models.At(subKey) - subModel.Properties.Do(func(k string, v ModelProperty) { - model.Properties.Put(k, v) - // if subModel says this property is required then include it - required := false - for _, each := range subModel.Required { - if k == each { - required = true - break - } - } - if required { - model.Required = append(model.Required, k) - } - }) - // add all new referenced models - sub.Models.Do(func(key string, sub Model) { - if key != subKey { - if _, ok := b.Models.At(key); !ok { - b.Models.Put(key, sub) - } - } - }) - // empty name signals skip property - return "", prop - } - // simple struct - b.addModel(fieldType, "") - var pType = b.keyFrom(fieldType) - prop.Ref = &pType - return jsonName, prop -} - -func (b modelBuilder) buildArrayTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) { - // check for type override in tags - prop.setPropertyMetadata(field) - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - if fieldType.Elem().Kind() == reflect.Uint8 { - stringt := "string" - prop.Type = &stringt - return jsonName, prop - } - var pType = "array" - prop.Type = &pType - isPrimitive := b.isPrimitiveType(fieldType.Elem().Name()) - elemTypeName := b.getElementTypeName(modelName, jsonName, fieldType.Elem()) - prop.Items = new(Item) - if isPrimitive { - mapped := b.jsonSchemaType(elemTypeName) - prop.Items.Type = &mapped - } else { - prop.Items.Ref = &elemTypeName - } - // add|overwrite model for element type - if fieldType.Elem().Kind() == reflect.Ptr { - fieldType = fieldType.Elem() - } - if !isPrimitive { - b.addModel(fieldType.Elem(), elemTypeName) - } - return jsonName, prop -} - -func (b modelBuilder) buildPointerTypeProperty(field reflect.StructField, jsonName, modelName string) (nameJson string, prop ModelProperty) { - prop.setPropertyMetadata(field) - // Check for type override in tags - if prop.Type != nil { - return jsonName, prop - } - fieldType := field.Type - - // override type of pointer to list-likes - if fieldType.Elem().Kind() == reflect.Slice || fieldType.Elem().Kind() == reflect.Array { - var pType = "array" - prop.Type = &pType - isPrimitive := b.isPrimitiveType(fieldType.Elem().Elem().Name()) - elemName := b.getElementTypeName(modelName, jsonName, fieldType.Elem().Elem()) - if isPrimitive { - primName := b.jsonSchemaType(elemName) - prop.Items = &Item{Ref: &primName} - } else { - prop.Items = &Item{Ref: &elemName} - } - if !isPrimitive { - // add|overwrite model for element type - b.addModel(fieldType.Elem().Elem(), elemName) - } - } else { - // non-array, pointer type - fieldTypeName := b.keyFrom(fieldType.Elem()) - var pType = b.jsonSchemaType(fieldTypeName) // no star, include pkg path - if b.isPrimitiveType(fieldTypeName) { - prop.Type = &pType - prop.Format = b.jsonSchemaFormat(fieldTypeName) - return jsonName, prop - } - prop.Ref = &pType - elemName := "" - if fieldType.Elem().Name() == "" { - elemName = modelName + "." + jsonName - prop.Ref = &elemName - } - b.addModel(fieldType.Elem(), elemName) - } - return jsonName, prop -} - -func (b modelBuilder) getElementTypeName(modelName, jsonName string, t reflect.Type) string { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.Name() == "" { - return modelName + "." + jsonName - } - return b.keyFrom(t) -} - -func (b modelBuilder) keyFrom(st reflect.Type) string { - key := st.String() - if b.Config != nil && b.Config.ModelTypeNameHandler != nil { - if name, ok := b.Config.ModelTypeNameHandler(st); ok { - key = name - } - } - if len(st.Name()) == 0 { // unnamed type - // Swagger UI has special meaning for [ - key = strings.Replace(key, "[]", "||", -1) - } - return key -} - -// see also https://golang.org/ref/spec#Numeric_types -func (b modelBuilder) isPrimitiveType(modelName string) bool { - if len(modelName) == 0 { - return false - } - return strings.Contains("uint uint8 uint16 uint32 uint64 int int8 int16 int32 int64 float32 float64 bool string byte rune time.Time", modelName) -} - -// jsonNameOfField returns the name of the field as it should appear in JSON format -// An empty string indicates that this field is not part of the JSON representation -func (b modelBuilder) jsonNameOfField(field reflect.StructField) string { - if jsonTag := field.Tag.Get("json"); jsonTag != "" { - s := strings.Split(jsonTag, ",") - if s[0] == "-" { - // empty name signals skip property - return "" - } else if s[0] != "" { - return s[0] - } - } - return field.Name -} - -// see also http://json-schema.org/latest/json-schema-core.html#anchor8 -func (b modelBuilder) jsonSchemaType(modelName string) string { - schemaMap := map[string]string{ - "uint": "integer", - "uint8": "integer", - "uint16": "integer", - "uint32": "integer", - "uint64": "integer", - - "int": "integer", - "int8": "integer", - "int16": "integer", - "int32": "integer", - "int64": "integer", - - "byte": "integer", - "float64": "number", - "float32": "number", - "bool": "boolean", - "time.Time": "string", - } - mapped, ok := schemaMap[modelName] - if !ok { - return modelName // use as is (custom or struct) - } - return mapped -} - -func (b modelBuilder) jsonSchemaFormat(modelName string) string { - if b.Config != nil && b.Config.SchemaFormatHandler != nil { - if mapped := b.Config.SchemaFormatHandler(modelName); mapped != "" { - return mapped - } - } - schemaMap := map[string]string{ - "int": "int32", - "int32": "int32", - "int64": "int64", - "byte": "byte", - "uint": "integer", - "uint8": "byte", - "float64": "double", - "float32": "float", - "time.Time": "date-time", - "*time.Time": "date-time", - } - mapped, ok := schemaMap[modelName] - if !ok { - return "" // no format - } - return mapped -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_list.go b/vendor/github.com/emicklei/go-restful-swagger12/model_list.go deleted file mode 100644 index 9bb6cb67850..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_list.go +++ /dev/null @@ -1,86 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// NamedModel associates a name with a Model (not using its Id) -type NamedModel struct { - Name string - Model Model -} - -// ModelList encapsulates a list of NamedModel (association) -type ModelList struct { - List []NamedModel -} - -// Put adds or replaces a Model by its name -func (l *ModelList) Put(name string, model Model) { - for i, each := range l.List { - if each.Name == name { - // replace - l.List[i] = NamedModel{name, model} - return - } - } - // add - l.List = append(l.List, NamedModel{name, model}) -} - -// At returns a Model by its name, ok is false if absent -func (l *ModelList) At(name string) (m Model, ok bool) { - for _, each := range l.List { - if each.Name == name { - return each.Model, true - } - } - return m, false -} - -// Do enumerates all the models, each with its assigned name -func (l *ModelList) Do(block func(name string, value Model)) { - for _, each := range l.List { - block(each.Name, each.Model) - } -} - -// MarshalJSON writes the ModelList as if it was a map[string]Model -func (l ModelList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.Name) - buf.WriteString("\": ") - encoder.Encode(each.Model) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -// UnmarshalJSON reads back a ModelList. This is an expensive operation. -func (l *ModelList) UnmarshalJSON(data []byte) error { - raw := map[string]interface{}{} - json.NewDecoder(bytes.NewReader(data)).Decode(&raw) - for k, v := range raw { - // produces JSON bytes for each value - data, err := json.Marshal(v) - if err != nil { - return err - } - var m Model - json.NewDecoder(bytes.NewReader(data)).Decode(&m) - l.Put(k, m) - } - return nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go deleted file mode 100644 index a433b6b7027..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_property_ext.go +++ /dev/null @@ -1,81 +0,0 @@ -package swagger - -import ( - "reflect" - "strings" -) - -func (prop *ModelProperty) setDescription(field reflect.StructField) { - if tag := field.Tag.Get("description"); tag != "" { - prop.Description = tag - } -} - -func (prop *ModelProperty) setDefaultValue(field reflect.StructField) { - if tag := field.Tag.Get("default"); tag != "" { - prop.DefaultValue = Special(tag) - } -} - -func (prop *ModelProperty) setEnumValues(field reflect.StructField) { - // We use | to separate the enum values. This value is chosen - // since its unlikely to be useful in actual enumeration values. - if tag := field.Tag.Get("enum"); tag != "" { - prop.Enum = strings.Split(tag, "|") - } -} - -func (prop *ModelProperty) setMaximum(field reflect.StructField) { - if tag := field.Tag.Get("maximum"); tag != "" { - prop.Maximum = tag - } -} - -func (prop *ModelProperty) setType(field reflect.StructField) { - if tag := field.Tag.Get("type"); tag != "" { - // Check if the first two characters of the type tag are - // intended to emulate slice/array behaviour. - // - // If type is intended to be a slice/array then add the - // overriden type to the array item instead of the main property - if len(tag) > 2 && tag[0:2] == "[]" { - pType := "array" - prop.Type = &pType - prop.Items = new(Item) - - iType := tag[2:] - prop.Items.Type = &iType - return - } - - prop.Type = &tag - } -} - -func (prop *ModelProperty) setMinimum(field reflect.StructField) { - if tag := field.Tag.Get("minimum"); tag != "" { - prop.Minimum = tag - } -} - -func (prop *ModelProperty) setUniqueItems(field reflect.StructField) { - tag := field.Tag.Get("unique") - switch tag { - case "true": - v := true - prop.UniqueItems = &v - case "false": - v := false - prop.UniqueItems = &v - } -} - -func (prop *ModelProperty) setPropertyMetadata(field reflect.StructField) { - prop.setDescription(field) - prop.setEnumValues(field) - prop.setMinimum(field) - prop.setMaximum(field) - prop.setUniqueItems(field) - prop.setDefaultValue(field) - prop.setType(field) -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go b/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go deleted file mode 100644 index 3babb194489..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/model_property_list.go +++ /dev/null @@ -1,87 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "encoding/json" -) - -// NamedModelProperty associates a name to a ModelProperty -type NamedModelProperty struct { - Name string - Property ModelProperty -} - -// ModelPropertyList encapsulates a list of NamedModelProperty (association) -type ModelPropertyList struct { - List []NamedModelProperty -} - -// At returns the ModelPropety by its name unless absent, then ok is false -func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) { - for _, each := range l.List { - if each.Name == name { - return each.Property, true - } - } - return p, false -} - -// Put adds or replaces a ModelProperty with this name -func (l *ModelPropertyList) Put(name string, prop ModelProperty) { - // maybe replace existing - for i, each := range l.List { - if each.Name == name { - // replace - l.List[i] = NamedModelProperty{Name: name, Property: prop} - return - } - } - // add - l.List = append(l.List, NamedModelProperty{Name: name, Property: prop}) -} - -// Do enumerates all the properties, each with its assigned name -func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) { - for _, each := range l.List { - block(each.Name, each.Property) - } -} - -// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty -func (l ModelPropertyList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - buf.WriteString("{\n") - for i, each := range l.List { - buf.WriteString("\"") - buf.WriteString(each.Name) - buf.WriteString("\": ") - encoder.Encode(each.Property) - if i < len(l.List)-1 { - buf.WriteString(",\n") - } - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation. -func (l *ModelPropertyList) UnmarshalJSON(data []byte) error { - raw := map[string]interface{}{} - json.NewDecoder(bytes.NewReader(data)).Decode(&raw) - for k, v := range raw { - // produces JSON bytes for each value - data, err := json.Marshal(v) - if err != nil { - return err - } - var m ModelProperty - json.NewDecoder(bytes.NewReader(data)).Decode(&m) - l.Put(k, m) - } - return nil -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go b/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go deleted file mode 100644 index b33ccfbeb9e..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/ordered_route_map.go +++ /dev/null @@ -1,36 +0,0 @@ -package swagger - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "github.com/emicklei/go-restful" - -type orderedRouteMap struct { - elements map[string][]restful.Route - keys []string -} - -func newOrderedRouteMap() *orderedRouteMap { - return &orderedRouteMap{ - elements: map[string][]restful.Route{}, - keys: []string{}, - } -} - -func (o *orderedRouteMap) Add(key string, route restful.Route) { - routes, ok := o.elements[key] - if ok { - routes = append(routes, route) - o.elements[key] = routes - return - } - o.elements[key] = []restful.Route{route} - o.keys = append(o.keys, key) -} - -func (o *orderedRouteMap) Do(block func(key string, routes []restful.Route)) { - for _, k := range o.keys { - block(k, o.elements[k]) - } -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger.go deleted file mode 100644 index 9c40833e7da..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger.go +++ /dev/null @@ -1,185 +0,0 @@ -// Package swagger implements the structures of the Swagger -// https://github.com/wordnik/swagger-spec/blob/master/versions/1.2.md -package swagger - -const swaggerVersion = "1.2" - -// 4.3.3 Data Type Fields -type DataTypeFields struct { - Type *string `json:"type,omitempty"` // if Ref not used - Ref *string `json:"$ref,omitempty"` // if Type not used - Format string `json:"format,omitempty"` - DefaultValue Special `json:"defaultValue,omitempty"` - Enum []string `json:"enum,omitempty"` - Minimum string `json:"minimum,omitempty"` - Maximum string `json:"maximum,omitempty"` - Items *Item `json:"items,omitempty"` - UniqueItems *bool `json:"uniqueItems,omitempty"` -} - -type Special string - -// 4.3.4 Items Object -type Item struct { - Type *string `json:"type,omitempty"` - Ref *string `json:"$ref,omitempty"` - Format string `json:"format,omitempty"` -} - -// 5.1 Resource Listing -type ResourceListing struct { - SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2 - Apis []Resource `json:"apis"` - ApiVersion string `json:"apiVersion"` - Info Info `json:"info"` - Authorizations []Authorization `json:"authorizations,omitempty"` -} - -// 5.1.2 Resource Object -type Resource struct { - Path string `json:"path"` // relative or absolute, must start with / - Description string `json:"description"` -} - -// 5.1.3 Info Object -type Info struct { - Title string `json:"title"` - Description string `json:"description"` - TermsOfServiceUrl string `json:"termsOfServiceUrl,omitempty"` - Contact string `json:"contact,omitempty"` - License string `json:"license,omitempty"` - LicenseUrl string `json:"licenseUrl,omitempty"` -} - -// 5.1.5 -type Authorization struct { - Type string `json:"type"` - PassAs string `json:"passAs"` - Keyname string `json:"keyname"` - Scopes []Scope `json:"scopes"` - GrantTypes []GrantType `json:"grandTypes"` -} - -// 5.1.6, 5.2.11 -type Scope struct { - // Required. The name of the scope. - Scope string `json:"scope"` - // Recommended. A short description of the scope. - Description string `json:"description"` -} - -// 5.1.7 -type GrantType struct { - Implicit Implicit `json:"implicit"` - AuthorizationCode AuthorizationCode `json:"authorization_code"` -} - -// 5.1.8 Implicit Object -type Implicit struct { - // Required. The login endpoint definition. - loginEndpoint LoginEndpoint `json:"loginEndpoint"` - // An optional alternative name to standard "access_token" OAuth2 parameter. - TokenName string `json:"tokenName"` -} - -// 5.1.9 Authorization Code Object -type AuthorizationCode struct { - TokenRequestEndpoint TokenRequestEndpoint `json:"tokenRequestEndpoint"` - TokenEndpoint TokenEndpoint `json:"tokenEndpoint"` -} - -// 5.1.10 Login Endpoint Object -type LoginEndpoint struct { - // Required. The URL of the authorization endpoint for the implicit grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` -} - -// 5.1.11 Token Request Endpoint Object -type TokenRequestEndpoint struct { - // Required. The URL of the authorization endpoint for the authentication code grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` - // An optional alternative name to standard "client_id" OAuth2 parameter. - ClientIdName string `json:"clientIdName"` - // An optional alternative name to the standard "client_secret" OAuth2 parameter. - ClientSecretName string `json:"clientSecretName"` -} - -// 5.1.12 Token Endpoint Object -type TokenEndpoint struct { - // Required. The URL of the token endpoint for the authentication code grant flow. The value SHOULD be in a URL format. - Url string `json:"url"` - // An optional alternative name to standard "access_token" OAuth2 parameter. - TokenName string `json:"tokenName"` -} - -// 5.2 API Declaration -type ApiDeclaration struct { - SwaggerVersion string `json:"swaggerVersion"` - ApiVersion string `json:"apiVersion"` - BasePath string `json:"basePath"` - ResourcePath string `json:"resourcePath"` // must start with / - Info Info `json:"info"` - Apis []Api `json:"apis,omitempty"` - Models ModelList `json:"models,omitempty"` - Produces []string `json:"produces,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Authorizations []Authorization `json:"authorizations,omitempty"` -} - -// 5.2.2 API Object -type Api struct { - Path string `json:"path"` // relative or absolute, must start with / - Description string `json:"description"` - Operations []Operation `json:"operations,omitempty"` -} - -// 5.2.3 Operation Object -type Operation struct { - DataTypeFields - Method string `json:"method"` - Summary string `json:"summary,omitempty"` - Notes string `json:"notes,omitempty"` - Nickname string `json:"nickname"` - Authorizations []Authorization `json:"authorizations,omitempty"` - Parameters []Parameter `json:"parameters"` - ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional - Produces []string `json:"produces,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Deprecated string `json:"deprecated,omitempty"` -} - -// 5.2.4 Parameter Object -type Parameter struct { - DataTypeFields - ParamType string `json:"paramType"` // path,query,body,header,form - Name string `json:"name"` - Description string `json:"description"` - Required bool `json:"required"` - AllowMultiple bool `json:"allowMultiple"` -} - -// 5.2.5 Response Message Object -type ResponseMessage struct { - Code int `json:"code"` - Message string `json:"message"` - ResponseModel string `json:"responseModel,omitempty"` -} - -// 5.2.6, 5.2.7 Models Object -type Model struct { - Id string `json:"id"` - Description string `json:"description,omitempty"` - Required []string `json:"required,omitempty"` - Properties ModelPropertyList `json:"properties"` - SubTypes []string `json:"subTypes,omitempty"` - Discriminator string `json:"discriminator,omitempty"` -} - -// 5.2.8 Properties Object -type ModelProperty struct { - DataTypeFields - Description string `json:"description,omitempty"` -} - -// 5.2.10 -type Authorizations map[string]Authorization diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go deleted file mode 100644 index 05a3c7e76f9..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger_builder.go +++ /dev/null @@ -1,21 +0,0 @@ -package swagger - -type SwaggerBuilder struct { - SwaggerService -} - -func NewSwaggerBuilder(config Config) *SwaggerBuilder { - return &SwaggerBuilder{*newSwaggerService(config)} -} - -func (sb SwaggerBuilder) ProduceListing() ResourceListing { - return sb.SwaggerService.produceListing() -} - -func (sb SwaggerBuilder) ProduceAllDeclarations() map[string]ApiDeclaration { - return sb.SwaggerService.produceAllDeclarations() -} - -func (sb SwaggerBuilder) ProduceDeclarations(route string) (*ApiDeclaration, bool) { - return sb.SwaggerService.produceDeclarations(route) -} diff --git a/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go b/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go deleted file mode 100644 index d90623120a6..00000000000 --- a/vendor/github.com/emicklei/go-restful-swagger12/swagger_webservice.go +++ /dev/null @@ -1,443 +0,0 @@ -package swagger - -import ( - "fmt" - - "github.com/emicklei/go-restful" - // "github.com/emicklei/hopwatch" - "net/http" - "reflect" - "sort" - "strings" - - "github.com/emicklei/go-restful/log" -) - -type SwaggerService struct { - config Config - apiDeclarationMap *ApiDeclarationList -} - -func newSwaggerService(config Config) *SwaggerService { - sws := &SwaggerService{ - config: config, - apiDeclarationMap: new(ApiDeclarationList)} - - // Build all ApiDeclarations - for _, each := range config.WebServices { - rootPath := each.RootPath() - // skip the api service itself - if rootPath != config.ApiPath { - if rootPath == "" || rootPath == "/" { - // use routes - for _, route := range each.Routes() { - entry := staticPathFromRoute(route) - _, exists := sws.apiDeclarationMap.At(entry) - if !exists { - sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry)) - } - } - } else { // use root path - sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath())) - } - } - } - - // if specified then call the PostBuilderHandler - if config.PostBuildHandler != nil { - config.PostBuildHandler(sws.apiDeclarationMap) - } - return sws -} - -// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf -var LogInfo = func(format string, v ...interface{}) { - // use the restful package-wide logger - log.Printf(format, v...) -} - -// InstallSwaggerService add the WebService that provides the API documentation of all services -// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki). -func InstallSwaggerService(aSwaggerConfig Config) { - RegisterSwaggerService(aSwaggerConfig, restful.DefaultContainer) -} - -// RegisterSwaggerService add the WebService that provides the API documentation of all services -// conform the Swagger documentation specifcation. (https://github.com/wordnik/swagger-core/wiki). -func RegisterSwaggerService(config Config, wsContainer *restful.Container) { - sws := newSwaggerService(config) - ws := new(restful.WebService) - ws.Path(config.ApiPath) - ws.Produces(restful.MIME_JSON) - if config.DisableCORS { - ws.Filter(enableCORS) - } - ws.Route(ws.GET("/").To(sws.getListing)) - ws.Route(ws.GET("/{a}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}").To(sws.getDeclarations)) - ws.Route(ws.GET("/{a}/{b}/{c}/{d}/{e}/{f}/{g}").To(sws.getDeclarations)) - LogInfo("[restful/swagger] listing is available at %v%v", config.WebServicesUrl, config.ApiPath) - wsContainer.Add(ws) - - // Check paths for UI serving - if config.StaticHandler == nil && config.SwaggerFilePath != "" && config.SwaggerPath != "" { - swaggerPathSlash := config.SwaggerPath - // path must end with slash / - if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] { - LogInfo("[restful/swagger] use corrected SwaggerPath ; must end with slash (/)") - swaggerPathSlash += "/" - } - - LogInfo("[restful/swagger] %v%v is mapped to folder %v", config.WebServicesUrl, swaggerPathSlash, config.SwaggerFilePath) - wsContainer.Handle(swaggerPathSlash, http.StripPrefix(swaggerPathSlash, http.FileServer(http.Dir(config.SwaggerFilePath)))) - - //if we define a custom static handler use it - } else if config.StaticHandler != nil && config.SwaggerPath != "" { - swaggerPathSlash := config.SwaggerPath - // path must end with slash / - if "/" != config.SwaggerPath[len(config.SwaggerPath)-1:] { - LogInfo("[restful/swagger] use corrected SwaggerFilePath ; must end with slash (/)") - swaggerPathSlash += "/" - - } - LogInfo("[restful/swagger] %v%v is mapped to custom Handler %T", config.WebServicesUrl, swaggerPathSlash, config.StaticHandler) - wsContainer.Handle(swaggerPathSlash, config.StaticHandler) - - } else { - LogInfo("[restful/swagger] Swagger(File)Path is empty ; no UI is served") - } -} - -func staticPathFromRoute(r restful.Route) string { - static := r.Path - bracket := strings.Index(static, "{") - if bracket <= 1 { // result cannot be empty - return static - } - if bracket != -1 { - static = r.Path[:bracket] - } - if strings.HasSuffix(static, "/") { - return static[:len(static)-1] - } else { - return static - } -} - -func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - if origin := req.HeaderParameter(restful.HEADER_Origin); origin != "" { - // prevent duplicate header - if len(resp.Header().Get(restful.HEADER_AccessControlAllowOrigin)) == 0 { - resp.AddHeader(restful.HEADER_AccessControlAllowOrigin, origin) - } - } - chain.ProcessFilter(req, resp) -} - -func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) { - listing := sws.produceListing() - resp.WriteAsJson(listing) -} - -func (sws SwaggerService) produceListing() ResourceListing { - listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion, Info: sws.config.Info} - sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) { - ref := Resource{Path: k} - if len(v.Apis) > 0 { // use description of first (could still be empty) - ref.Description = v.Apis[0].Description - } - listing.Apis = append(listing.Apis, ref) - }) - return listing -} - -func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) { - decl, ok := sws.produceDeclarations(composeRootPath(req)) - if !ok { - resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found") - return - } - // unless WebServicesUrl is given - if len(sws.config.WebServicesUrl) == 0 { - // update base path from the actual request - // TODO how to detect https? assume http for now - var host string - // X-Forwarded-Host or Host or Request.Host - hostvalues, ok := req.Request.Header["X-Forwarded-Host"] // apache specific? - if !ok || len(hostvalues) == 0 { - forwarded, ok := req.Request.Header["Host"] // without reverse-proxy - if !ok || len(forwarded) == 0 { - // fallback to Host field - host = req.Request.Host - } else { - host = forwarded[0] - } - } else { - host = hostvalues[0] - } - // inspect Referer for the scheme (http vs https) - scheme := "http" - if referer := req.Request.Header["Referer"]; len(referer) > 0 { - if strings.HasPrefix(referer[0], "https") { - scheme = "https" - } - } - decl.BasePath = fmt.Sprintf("%s://%s", scheme, host) - } - resp.WriteAsJson(decl) -} - -func (sws SwaggerService) produceAllDeclarations() map[string]ApiDeclaration { - decls := map[string]ApiDeclaration{} - sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) { - decls[k] = v - }) - return decls -} - -func (sws SwaggerService) produceDeclarations(route string) (*ApiDeclaration, bool) { - decl, ok := sws.apiDeclarationMap.At(route) - if !ok { - return nil, false - } - decl.BasePath = sws.config.WebServicesUrl - return &decl, true -} - -// composeDeclaration uses all routes and parameters to create a ApiDeclaration -func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix string) ApiDeclaration { - decl := ApiDeclaration{ - SwaggerVersion: swaggerVersion, - BasePath: sws.config.WebServicesUrl, - ResourcePath: pathPrefix, - Models: ModelList{}, - ApiVersion: ws.Version()} - - // collect any path parameters - rootParams := []Parameter{} - for _, param := range ws.PathParameters() { - rootParams = append(rootParams, asSwaggerParameter(param.Data())) - } - // aggregate by path - pathToRoutes := newOrderedRouteMap() - for _, other := range ws.Routes() { - if strings.HasPrefix(other.Path, pathPrefix) { - if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' { - continue - } - pathToRoutes.Add(other.Path, other) - } - } - pathToRoutes.Do(func(path string, routes []restful.Route) { - api := Api{Path: strings.TrimSuffix(withoutWildcard(path), "/"), Description: ws.Documentation()} - voidString := "void" - for _, route := range routes { - operation := Operation{ - Method: route.Method, - Summary: route.Doc, - Notes: route.Notes, - // Type gets overwritten if there is a write sample - DataTypeFields: DataTypeFields{Type: &voidString}, - Parameters: []Parameter{}, - Nickname: route.Operation, - ResponseMessages: composeResponseMessages(route, &decl, &sws.config)} - - operation.Consumes = route.Consumes - operation.Produces = route.Produces - - // share root params if any - for _, swparam := range rootParams { - operation.Parameters = append(operation.Parameters, swparam) - } - // route specific params - for _, param := range route.ParameterDocs { - operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data())) - } - - sws.addModelsFromRouteTo(&operation, route, &decl) - api.Operations = append(api.Operations, operation) - } - decl.Apis = append(decl.Apis, api) - }) - return decl -} - -func withoutWildcard(path string) string { - if strings.HasSuffix(path, ":*}") { - return path[0:len(path)-3] + "}" - } - return path -} - -// composeResponseMessages takes the ResponseErrors (if any) and creates ResponseMessages from them. -func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *Config) (messages []ResponseMessage) { - if route.ResponseErrors == nil { - return messages - } - // sort by code - codes := sort.IntSlice{} - for code := range route.ResponseErrors { - codes = append(codes, code) - } - codes.Sort() - for _, code := range codes { - each := route.ResponseErrors[code] - message := ResponseMessage{ - Code: code, - Message: each.Message, - } - if each.Model != nil { - st := reflect.TypeOf(each.Model) - isCollection, st := detectCollectionType(st) - // collection cannot be in responsemodel - if !isCollection { - modelName := modelBuilder{}.keyFrom(st) - modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "") - message.ResponseModel = modelName - } - } - messages = append(messages, message) - } - return -} - -// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it. -func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) { - if route.ReadSample != nil { - sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models) - } - if route.WriteSample != nil { - sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models) - } -} - -func detectCollectionType(st reflect.Type) (bool, reflect.Type) { - isCollection := false - if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { - st = st.Elem() - isCollection = true - } else { - if st.Kind() == reflect.Ptr { - if st.Elem().Kind() == reflect.Slice || st.Elem().Kind() == reflect.Array { - st = st.Elem().Elem() - isCollection = true - } - } - } - return isCollection, st -} - -// addModelFromSample creates and adds (or overwrites) a Model from a sample resource -func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) { - mb := modelBuilder{Models: models, Config: &sws.config} - if isResponse { - sampleType, items := asDataType(sample, &sws.config) - operation.Type = sampleType - operation.Items = items - } - mb.addModelFrom(sample) -} - -func asSwaggerParameter(param restful.ParameterData) Parameter { - return Parameter{ - DataTypeFields: DataTypeFields{ - Type: ¶m.DataType, - Format: asFormat(param.DataType, param.DataFormat), - DefaultValue: Special(param.DefaultValue), - }, - Name: param.Name, - Description: param.Description, - ParamType: asParamType(param.Kind), - - Required: param.Required} -} - -// Between 1..7 path parameters is supported -func composeRootPath(req *restful.Request) string { - path := "/" + req.PathParameter("a") - b := req.PathParameter("b") - if b == "" { - return path - } - path = path + "/" + b - c := req.PathParameter("c") - if c == "" { - return path - } - path = path + "/" + c - d := req.PathParameter("d") - if d == "" { - return path - } - path = path + "/" + d - e := req.PathParameter("e") - if e == "" { - return path - } - path = path + "/" + e - f := req.PathParameter("f") - if f == "" { - return path - } - path = path + "/" + f - g := req.PathParameter("g") - if g == "" { - return path - } - return path + "/" + g -} - -func asFormat(dataType string, dataFormat string) string { - if dataFormat != "" { - return dataFormat - } - return "" // TODO -} - -func asParamType(kind int) string { - switch { - case kind == restful.PathParameterKind: - return "path" - case kind == restful.QueryParameterKind: - return "query" - case kind == restful.BodyParameterKind: - return "body" - case kind == restful.HeaderParameterKind: - return "header" - case kind == restful.FormParameterKind: - return "form" - } - return "" -} - -func asDataType(any interface{}, config *Config) (*string, *Item) { - // If it's not a collection, return the suggested model name - st := reflect.TypeOf(any) - isCollection, st := detectCollectionType(st) - modelName := modelBuilder{}.keyFrom(st) - // if it's not a collection we are done - if !isCollection { - return &modelName, nil - } - - // XXX: This is not very elegant - // We create an Item object referring to the given model - models := ModelList{} - mb := modelBuilder{Models: &models, Config: config} - mb.addModelFrom(any) - - elemTypeName := mb.getElementTypeName(modelName, "", st) - item := new(Item) - if mb.isPrimitiveType(elemTypeName) { - mapped := mb.jsonSchemaType(elemTypeName) - item.Type = &mapped - } else { - item.Ref = &elemTypeName - } - tmp := "array" - return &tmp, item -} diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE deleted file mode 100644 index 0eb9b72d84d..00000000000 --- a/vendor/github.com/evanphx/json-patch/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014, Evan Phoenix -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go deleted file mode 100644 index b9af252fec3..00000000000 --- a/vendor/github.com/evanphx/json-patch/merge.go +++ /dev/null @@ -1,305 +0,0 @@ -package jsonpatch - -import ( - "encoding/json" - "fmt" - "reflect" -) - -func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { - curDoc, err := cur.intoDoc() - - if err != nil { - pruneNulls(patch) - return patch - } - - patchDoc, err := patch.intoDoc() - - if err != nil { - return patch - } - - mergeDocs(curDoc, patchDoc, mergeMerge) - - return cur -} - -func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { - for k, v := range *patch { - if v == nil { - if mergeMerge { - (*doc)[k] = nil - } else { - delete(*doc, k) - } - } else { - cur, ok := (*doc)[k] - - if !ok || cur == nil { - pruneNulls(v) - (*doc)[k] = v - } else { - (*doc)[k] = merge(cur, v, mergeMerge) - } - } - } -} - -func pruneNulls(n *lazyNode) { - sub, err := n.intoDoc() - - if err == nil { - pruneDocNulls(sub) - } else { - ary, err := n.intoAry() - - if err == nil { - pruneAryNulls(ary) - } - } -} - -func pruneDocNulls(doc *partialDoc) *partialDoc { - for k, v := range *doc { - if v == nil { - delete(*doc, k) - } else { - pruneNulls(v) - } - } - - return doc -} - -func pruneAryNulls(ary *partialArray) *partialArray { - newAry := []*lazyNode{} - - for _, v := range *ary { - if v != nil { - pruneNulls(v) - newAry = append(newAry, v) - } - } - - *ary = newAry - - return ary -} - -var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") - -// MergeMergePatches merges two merge patches together, such that -// applying this resulting merged merge patch to a document yields the same -// as merging each merge patch to the document in succession. -func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { - return doMergePatch(patch1Data, patch2Data, true) -} - -// MergePatch merges the patchData into the docData. -func MergePatch(docData, patchData []byte) ([]byte, error) { - return doMergePatch(docData, patchData, false) -} - -func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { - doc := &partialDoc{} - - docErr := json.Unmarshal(docData, doc) - - patch := &partialDoc{} - - patchErr := json.Unmarshal(patchData, patch) - - if _, ok := docErr.(*json.SyntaxError); ok { - return nil, errBadJSONDoc - } - - if _, ok := patchErr.(*json.SyntaxError); ok { - return nil, errBadJSONPatch - } - - if docErr == nil && *doc == nil { - return nil, errBadJSONDoc - } - - if patchErr == nil && *patch == nil { - return nil, errBadJSONPatch - } - - if docErr != nil || patchErr != nil { - // Not an error, just not a doc, so we turn straight into the patch - if patchErr == nil { - if mergeMerge { - doc = patch - } else { - doc = pruneDocNulls(patch) - } - } else { - patchAry := &partialArray{} - patchErr = json.Unmarshal(patchData, patchAry) - - if patchErr != nil { - return nil, errBadJSONPatch - } - - pruneAryNulls(patchAry) - - out, patchErr := json.Marshal(patchAry) - - if patchErr != nil { - return nil, errBadJSONPatch - } - - return out, nil - } - } else { - mergeDocs(doc, patch, mergeMerge) - } - - return json.Marshal(doc) -} - -// CreateMergePatch creates a merge patch as specified in http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 -// -// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. -// The function will return a mergeable json document with differences from a to b. -// -// An error will be returned if any of the two documents are invalid. -func CreateMergePatch(a, b []byte) ([]byte, error) { - aI := map[string]interface{}{} - bI := map[string]interface{}{} - err := json.Unmarshal(a, &aI) - if err != nil { - return nil, errBadJSONDoc - } - err = json.Unmarshal(b, &bI) - if err != nil { - return nil, errBadJSONDoc - } - dest, err := getDiff(aI, bI) - if err != nil { - return nil, err - } - return json.Marshal(dest) -} - -// Returns true if the array matches (must be json types). -// As is idiomatic for go, an empty array is not the same as a nil array. -func matchesArray(a, b []interface{}) bool { - if len(a) != len(b) { - return false - } - if (a == nil && b != nil) || (a != nil && b == nil) { - return false - } - for i := range a { - if !matchesValue(a[i], b[i]) { - return false - } - } - return true -} - -// Returns true if the values matches (must be json types) -// The types of the values must match, otherwise it will always return false -// If two map[string]interface{} are given, all elements must match. -func matchesValue(av, bv interface{}) bool { - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - return false - } - switch at := av.(type) { - case string: - bt := bv.(string) - if bt == at { - return true - } - case float64: - bt := bv.(float64) - if bt == at { - return true - } - case bool: - bt := bv.(bool) - if bt == at { - return true - } - case nil: - // Both nil, fine. - return true - case map[string]interface{}: - bt := bv.(map[string]interface{}) - for key := range at { - if !matchesValue(at[key], bt[key]) { - return false - } - } - for key := range bt { - if !matchesValue(at[key], bt[key]) { - return false - } - } - return true - case []interface{}: - bt := bv.([]interface{}) - return matchesArray(at, bt) - } - return false -} - -// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. -func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { - into := map[string]interface{}{} - for key, bv := range b { - av, ok := a[key] - // value was added - if !ok { - into[key] = bv - continue - } - // If types have changed, replace completely - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - into[key] = bv - continue - } - // Types are the same, compare values - switch at := av.(type) { - case map[string]interface{}: - bt := bv.(map[string]interface{}) - dst := make(map[string]interface{}, len(bt)) - dst, err := getDiff(at, bt) - if err != nil { - return nil, err - } - if len(dst) > 0 { - into[key] = dst - } - case string, float64, bool: - if !matchesValue(av, bv) { - into[key] = bv - } - case []interface{}: - bt := bv.([]interface{}) - if !matchesArray(at, bt) { - into[key] = bv - } - case nil: - switch bv.(type) { - case nil: - // Both nil, fine. - default: - into[key] = bv - } - default: - panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) - } - } - // Now add all deleted values as nil - for key := range a { - _, found := b[key] - if !found { - into[key] = nil - } - } - return into, nil -} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go deleted file mode 100644 index de70dbca78e..00000000000 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ /dev/null @@ -1,643 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" -) - -const ( - eRaw = iota - eDoc - eAry -) - -type lazyNode struct { - raw *json.RawMessage - doc partialDoc - ary partialArray - which int -} - -type operation map[string]*json.RawMessage - -// Patch is an ordered collection of operations. -type Patch []operation - -type partialDoc map[string]*lazyNode -type partialArray []*lazyNode - -type container interface { - get(key string) (*lazyNode, error) - set(key string, val *lazyNode) error - add(key string, val *lazyNode) error - remove(key string) error -} - -func newLazyNode(raw *json.RawMessage) *lazyNode { - return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} -} - -func (n *lazyNode) MarshalJSON() ([]byte, error) { - switch n.which { - case eRaw: - return json.Marshal(n.raw) - case eDoc: - return json.Marshal(n.doc) - case eAry: - return json.Marshal(n.ary) - default: - return nil, fmt.Errorf("Unknown type") - } -} - -func (n *lazyNode) UnmarshalJSON(data []byte) error { - dest := make(json.RawMessage, len(data)) - copy(dest, data) - n.raw = &dest - n.which = eRaw - return nil -} - -func (n *lazyNode) intoDoc() (*partialDoc, error) { - if n.which == eDoc { - return &n.doc, nil - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return nil, err - } - - n.which = eDoc - return &n.doc, nil -} - -func (n *lazyNode) intoAry() (*partialArray, error) { - if n.which == eAry { - return &n.ary, nil - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return nil, err - } - - n.which = eAry - return &n.ary, nil -} - -func (n *lazyNode) compact() []byte { - buf := &bytes.Buffer{} - - err := json.Compact(buf, *n.raw) - - if err != nil { - return *n.raw - } - - return buf.Bytes() -} - -func (n *lazyNode) tryDoc() bool { - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return false - } - - n.which = eDoc - return true -} - -func (n *lazyNode) tryAry() bool { - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return false - } - - n.which = eAry - return true -} - -func (n *lazyNode) equal(o *lazyNode) bool { - if n.which == eRaw { - if !n.tryDoc() && !n.tryAry() { - if o.which != eRaw { - return false - } - - return bytes.Equal(n.compact(), o.compact()) - } - } - - if n.which == eDoc { - if o.which == eRaw { - if !o.tryDoc() { - return false - } - } - - if o.which != eDoc { - return false - } - - for k, v := range n.doc { - ov, ok := o.doc[k] - - if !ok { - return false - } - - if v == nil && ov == nil { - continue - } - - if !v.equal(ov) { - return false - } - } - - return true - } - - if o.which != eAry && !o.tryAry() { - return false - } - - if len(n.ary) != len(o.ary) { - return false - } - - for idx, val := range n.ary { - if !val.equal(o.ary[idx]) { - return false - } - } - - return true -} - -func (o operation) kind() string { - if obj, ok := o["op"]; ok { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) path() string { - if obj, ok := o["path"]; ok { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) from() string { - if obj, ok := o["from"]; ok { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) value() *lazyNode { - if obj, ok := o["value"]; ok { - return newLazyNode(obj) - } - - return nil -} - -func isArray(buf []byte) bool { -Loop: - for _, c := range buf { - switch c { - case ' ': - case '\n': - case '\t': - continue - case '[': - return true - default: - break Loop - } - } - - return false -} - -func findObject(pd *container, path string) (container, string) { - doc := *pd - - split := strings.Split(path, "/") - - if len(split) < 2 { - return nil, "" - } - - parts := split[1 : len(split)-1] - - key := split[len(split)-1] - - var err error - - for _, part := range parts { - - next, ok := doc.get(decodePatchKey(part)) - - if next == nil || ok != nil { - return nil, "" - } - - if isArray(*next.raw) { - doc, err = next.intoAry() - - if err != nil { - return nil, "" - } - } else { - doc, err = next.intoDoc() - - if err != nil { - return nil, "" - } - } - } - - return doc, decodePatchKey(key) -} - -func (d *partialDoc) set(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) add(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) get(key string) (*lazyNode, error) { - return (*d)[key], nil -} - -func (d *partialDoc) remove(key string) error { - _, ok := (*d)[key] - if !ok { - return fmt.Errorf("Unable to remove nonexistent key: %s", key) - } - - delete(*d, key) - return nil -} - -func (d *partialArray) set(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - sz := len(*d) - if idx+1 > sz { - sz = idx + 1 - } - - ary := make([]*lazyNode, sz) - - cur := *d - - copy(ary, cur) - - if idx >= len(ary) { - return fmt.Errorf("Unable to access invalid index: %d", idx) - } - - ary[idx] = val - - *d = ary - return nil -} - -func (d *partialArray) add(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - ary := make([]*lazyNode, len(*d)+1) - - cur := *d - - if idx < 0 { - idx *= -1 - - if idx > len(ary) { - return fmt.Errorf("Unable to access invalid index: %d", idx) - } - idx = len(ary) - idx - } - - copy(ary[0:idx], cur[0:idx]) - ary[idx] = val - copy(ary[idx+1:], cur[idx:]) - - *d = ary - return nil -} - -func (d *partialArray) get(key string) (*lazyNode, error) { - idx, err := strconv.Atoi(key) - - if err != nil { - return nil, err - } - - if idx >= len(*d) { - return nil, fmt.Errorf("Unable to access invalid index: %d", idx) - } - - return (*d)[idx], nil -} - -func (d *partialArray) remove(key string) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - cur := *d - - if idx >= len(cur) { - return fmt.Errorf("Unable to remove invalid index: %d", idx) - } - - ary := make([]*lazyNode, len(cur)-1) - - copy(ary[0:idx], cur[0:idx]) - copy(ary[idx:], cur[idx+1:]) - - *d = ary - return nil - -} - -func (p Patch) add(doc *container, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: %s", path) - } - - return con.add(key, op.value()) -} - -func (p Patch) remove(doc *container, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: %s", path) - } - - return con.remove(key) -} - -func (p Patch) replace(doc *container, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) - } - - val, ok := con.get(key) - if val == nil || ok != nil { - return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path) - } - - return con.set(key, op.value()) -} - -func (p Patch) move(doc *container, op operation) error { - from := op.from() - - con, key := findObject(doc, from) - - if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return err - } - - err = con.remove(key) - if err != nil { - return err - } - - path := op.path() - - con, key = findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) - } - - return con.set(key, val) -} - -func (p Patch) test(doc *container, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) - } - - val, err := con.get(key) - - if err != nil { - return err - } - - if val == nil { - if op.value().raw == nil { - return nil - } - return fmt.Errorf("Testing value %s failed", path) - } - - if val.equal(op.value()) { - return nil - } - - return fmt.Errorf("Testing value %s failed", path) -} - -func (p Patch) copy(doc *container, op operation) error { - from := op.from() - - con, key := findObject(doc, from) - - if con == nil { - return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return err - } - - path := op.path() - - con, key = findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path) - } - - return con.set(key, val) -} - -// Equal indicates if 2 JSON documents have the same structural equality. -func Equal(a, b []byte) bool { - ra := make(json.RawMessage, len(a)) - copy(ra, a) - la := newLazyNode(&ra) - - rb := make(json.RawMessage, len(b)) - copy(rb, b) - lb := newLazyNode(&rb) - - return la.equal(lb) -} - -// DecodePatch decodes the passed JSON document as an RFC 6902 patch. -func DecodePatch(buf []byte) (Patch, error) { - var p Patch - - err := json.Unmarshal(buf, &p) - - if err != nil { - return nil, err - } - - return p, nil -} - -// Apply mutates a JSON document according to the patch, and returns the new -// document. -func (p Patch) Apply(doc []byte) ([]byte, error) { - return p.ApplyIndent(doc, "") -} - -// ApplyIndent mutates a JSON document according to the patch, and returns the new -// document indented. -func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { - var pd container - if doc[0] == '[' { - pd = &partialArray{} - } else { - pd = &partialDoc{} - } - - err := json.Unmarshal(doc, pd) - - if err != nil { - return nil, err - } - - err = nil - - for _, op := range p { - switch op.kind() { - case "add": - err = p.add(&pd, op) - case "remove": - err = p.remove(&pd, op) - case "replace": - err = p.replace(&pd, op) - case "move": - err = p.move(&pd, op) - case "test": - err = p.test(&pd, op) - case "copy": - err = p.copy(&pd, op) - default: - err = fmt.Errorf("Unexpected kind: %s", op.kind()) - } - - if err != nil { - return nil, err - } - } - - if indent != "" { - return json.MarshalIndent(pd, "", indent) - } - - return json.Marshal(pd) -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. - -var ( - rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") -) - -func decodePatchKey(k string) string { - return rfc6901Decoder.Replace(k) -} diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 1b1b1921efa..00000000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go deleted file mode 100644 index 110ae138427..00000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ /dev/null @@ -1,1083 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. -It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. - -This package produces a different output than the standard "encoding/json" package, -which does not operate correctly on protocol buffers. -*/ -package jsonpb - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - - stpb "github.com/golang/protobuf/ptypes/struct" -) - -// Marshaler is a configurable object for converting between -// protocol buffer objects and a JSON representation for them. -type Marshaler struct { - // Whether to render enum values as integers, as opposed to string values. - EnumsAsInts bool - - // Whether to render fields with zero values. - EmitDefaults bool - - // A string to indent each level by. The presence of this field will - // also cause a space to appear between the field separator and - // value, and for newlines to be appear between fields and array - // elements. - Indent string - - // Whether to use the original (.proto) name for fields. - OrigName bool - - // A custom URL resolver to use when marshaling Any messages to JSON. - // If unset, the default resolution strategy is to extract the - // fully-qualified type name from the type URL and pass that to - // proto.MessageType(string). - AnyResolver AnyResolver -} - -// AnyResolver takes a type URL, present in an Any message, and resolves it into -// an instance of the associated message. -type AnyResolver interface { - Resolve(typeUrl string) (proto.Message, error) -} - -func defaultResolveAny(typeUrl string) (proto.Message, error) { - // Only the part of typeUrl after the last slash is relevant. - mname := typeUrl - if slash := strings.LastIndex(mname, "/"); slash >= 0 { - mname = mname[slash+1:] - } - mt := proto.MessageType(mname) - if mt == nil { - return nil, fmt.Errorf("unknown message type %q", mname) - } - return reflect.New(mt.Elem()).Interface().(proto.Message), nil -} - -// JSONPBMarshaler is implemented by protobuf messages that customize the -// way they are marshaled to JSON. Messages that implement this should -// also implement JSONPBUnmarshaler so that the custom format can be -// parsed. -type JSONPBMarshaler interface { - MarshalJSONPB(*Marshaler) ([]byte, error) -} - -// JSONPBUnmarshaler is implemented by protobuf messages that customize -// the way they are unmarshaled from JSON. Messages that implement this -// should also implement JSONPBMarshaler so that the custom format can be -// produced. -type JSONPBUnmarshaler interface { - UnmarshalJSONPB(*Unmarshaler, []byte) error -} - -// Marshal marshals a protocol buffer into JSON. -func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { - writer := &errWriter{writer: out} - return m.marshalObject(writer, pb, "", "") -} - -// MarshalToString converts a protocol buffer object to JSON string. -func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { - var buf bytes.Buffer - if err := m.Marshal(&buf, pb); err != nil { - return "", err - } - return buf.String(), nil -} - -type int32Slice []int32 - -var nonFinite = map[string]float64{ - `"NaN"`: math.NaN(), - `"Infinity"`: math.Inf(1), - `"-Infinity"`: math.Inf(-1), -} - -// For sorting extensions ids to ensure stable output. -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type wkt interface { - XXX_WellKnownType() string -} - -// marshalObject writes a struct to the Writer. -func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { - if jsm, ok := v.(JSONPBMarshaler); ok { - b, err := jsm.MarshalJSONPB(m) - if err != nil { - return err - } - if typeURL != "" { - // we are marshaling this object to an Any type - var js map[string]*json.RawMessage - if err = json.Unmarshal(b, &js); err != nil { - return fmt.Errorf("type %T produced invalid JSON: %v", v, err) - } - turl, err := json.Marshal(typeURL) - if err != nil { - return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) - } - js["@type"] = (*json.RawMessage)(&turl) - if b, err = json.Marshal(js); err != nil { - return err - } - } - - out.write(string(b)) - return out.err - } - - s := reflect.ValueOf(v).Elem() - - // Handle well-known types. - if wkt, ok := v.(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - // "Wrappers use the same representation in JSON - // as the wrapped primitive type, ..." - sprop := proto.GetProperties(s.Type()) - return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) - case "Any": - // Any is a bit more involved. - return m.marshalAny(out, v, indent) - case "Duration": - // "Generated output always contains 3, 6, or 9 fractional digits, - // depending on required precision." - s, ns := s.Field(0).Int(), s.Field(1).Int() - d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond - x := fmt.Sprintf("%.9f", d.Seconds()) - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - out.write(`"`) - out.write(x) - out.write(`s"`) - return out.err - case "Struct", "ListValue": - // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) - case "Timestamp": - // "RFC 3339, where generated output will always be Z-normalized - // and uses 3, 6 or 9 fractional digits." - s, ns := s.Field(0).Int(), s.Field(1).Int() - t := time.Unix(s, ns).UTC() - // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). - x := t.Format("2006-01-02T15:04:05.000000000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - out.write(`"`) - out.write(x) - out.write(`Z"`) - return out.err - case "Value": - // Value has a single oneof. - kind := s.Field(0) - if kind.IsNil() { - // "absence of any variant indicates an error" - return errors.New("nil Value") - } - // oneof -> *T -> T -> T.F - x := kind.Elem().Elem().Field(0) - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, x, indent) - } - } - - out.write("{") - if m.Indent != "" { - out.write("\n") - } - - firstField := true - - if typeURL != "" { - if err := m.marshalTypeURL(out, indent, typeURL); err != nil { - return err - } - firstField = false - } - - for i := 0; i < s.NumField(); i++ { - value := s.Field(i) - valueField := s.Type().Field(i) - if strings.HasPrefix(valueField.Name, "XXX_") { - continue - } - - // IsNil will panic on most value kinds. - switch value.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface: - if value.IsNil() { - continue - } - } - - if !m.EmitDefaults { - switch value.Kind() { - case reflect.Bool: - if !value.Bool() { - continue - } - case reflect.Int32, reflect.Int64: - if value.Int() == 0 { - continue - } - case reflect.Uint32, reflect.Uint64: - if value.Uint() == 0 { - continue - } - case reflect.Float32, reflect.Float64: - if value.Float() == 0 { - continue - } - case reflect.String: - if value.Len() == 0 { - continue - } - case reflect.Map, reflect.Ptr, reflect.Slice: - if value.IsNil() { - continue - } - } - } - - // Oneof fields need special handling. - if valueField.Tag.Get("protobuf_oneof") != "" { - // value is an interface containing &T{real_value}. - sv := value.Elem().Elem() // interface -> *T -> T - value = sv.Field(0) - valueField = sv.Type().Field(0) - } - prop := jsonProperties(valueField, m.OrigName) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, prop, value, indent); err != nil { - return err - } - firstField = false - } - - // Handle proto2 extensions. - if ep, ok := v.(proto.Message); ok { - extensions := proto.RegisteredExtensions(v) - // Sort extensions for stable output. - ids := make([]int32, 0, len(extensions)) - for id, desc := range extensions { - if !proto.HasExtension(ep, desc) { - continue - } - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - for _, id := range ids { - desc := extensions[id] - if desc == nil { - // unknown extension - continue - } - ext, extErr := proto.GetExtension(ep, desc) - if extErr != nil { - return extErr - } - value := reflect.ValueOf(ext) - var prop proto.Properties - prop.Parse(desc.Tag) - prop.JSONName = fmt.Sprintf("[%s]", desc.Name) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, &prop, value, indent); err != nil { - return err - } - firstField = false - } - - } - - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err -} - -func (m *Marshaler) writeSep(out *errWriter) { - if m.Indent != "" { - out.write(",\n") - } else { - out.write(",") - } -} - -func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { - // "If the Any contains a value that has a special JSON mapping, - // it will be converted as follows: {"@type": xxx, "value": yyy}. - // Otherwise, the value will be converted into a JSON object, - // and the "@type" field will be inserted to indicate the actual data type." - v := reflect.ValueOf(any).Elem() - turl := v.Field(0).String() - val := v.Field(1).Bytes() - - var msg proto.Message - var err error - if m.AnyResolver != nil { - msg, err = m.AnyResolver.Resolve(turl) - } else { - msg, err = defaultResolveAny(turl) - } - if err != nil { - return err - } - - if err := proto.Unmarshal(val, msg); err != nil { - return err - } - - if _, ok := msg.(wkt); ok { - out.write("{") - if m.Indent != "" { - out.write("\n") - } - if err := m.marshalTypeURL(out, indent, turl); err != nil { - return err - } - m.writeSep(out) - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - out.write(`"value": `) - } else { - out.write(`"value":`) - } - if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { - return err - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err - } - - return m.marshalObject(out, msg, indent, turl) -} - -func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"@type":`) - if m.Indent != "" { - out.write(" ") - } - b, err := json.Marshal(typeURL) - if err != nil { - return err - } - out.write(string(b)) - return out.err -} - -// marshalField writes field description and value to the Writer. -func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"`) - out.write(prop.JSONName) - out.write(`":`) - if m.Indent != "" { - out.write(" ") - } - if err := m.marshalValue(out, prop, v, indent); err != nil { - return err - } - return nil -} - -// marshalValue writes the value to the Writer. -func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - var err error - v = reflect.Indirect(v) - - // Handle nil pointer - if v.Kind() == reflect.Invalid { - out.write("null") - return out.err - } - - // Handle repeated elements. - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - out.write("[") - comma := "" - for i := 0; i < v.Len(); i++ { - sliceVal := v.Index(i) - out.write(comma) - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { - return err - } - comma = "," - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write("]") - return out.err - } - - // Handle well-known types. - // Most are handled up in marshalObject (because 99% are messages). - if wkt, ok := v.Interface().(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "NullValue": - out.write("null") - return out.err - } - } - - // Handle enumerations. - if !m.EnumsAsInts && prop.Enum != "" { - // Unknown enum values will are stringified by the proto library as their - // value. Such values should _not_ be quoted or they will be interpreted - // as an enum string instead of their value. - enumStr := v.Interface().(fmt.Stringer).String() - var valStr string - if v.Kind() == reflect.Ptr { - valStr = strconv.Itoa(int(v.Elem().Int())) - } else { - valStr = strconv.Itoa(int(v.Int())) - } - isKnownEnum := enumStr != valStr - if isKnownEnum { - out.write(`"`) - } - out.write(enumStr) - if isKnownEnum { - out.write(`"`) - } - return out.err - } - - // Handle nested messages. - if v.Kind() == reflect.Struct { - return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") - } - - // Handle maps. - // Since Go randomizes map iteration, we sort keys for stable output. - if v.Kind() == reflect.Map { - out.write(`{`) - keys := v.MapKeys() - sort.Sort(mapKeys(keys)) - for i, k := range keys { - if i > 0 { - out.write(`,`) - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - - b, err := json.Marshal(k.Interface()) - if err != nil { - return err - } - s := string(b) - - // If the JSON is not a string value, encode it again to make it one. - if !strings.HasPrefix(s, `"`) { - b, err := json.Marshal(s) - if err != nil { - return err - } - s = string(b) - } - - out.write(s) - out.write(`:`) - if m.Indent != "" { - out.write(` `) - } - - if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil { - return err - } - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write(`}`) - return out.err - } - - // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - f := v.Float() - var sval string - switch { - case math.IsInf(f, 1): - sval = `"Infinity"` - case math.IsInf(f, -1): - sval = `"-Infinity"` - case math.IsNaN(f): - sval = `"NaN"` - } - if sval != "" { - out.write(sval) - return out.err - } - } - - // Default handling defers to the encoding/json library. - b, err := json.Marshal(v.Interface()) - if err != nil { - return err - } - needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) - if needToQuote { - out.write(`"`) - } - out.write(string(b)) - if needToQuote { - out.write(`"`) - } - return out.err -} - -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // Whether to allow messages to contain unknown fields, as opposed to - // failing to unmarshal. - AllowUnknownFields bool - - // A custom URL resolver to use when unmarshaling Any messages from JSON. - // If unset, the default resolution strategy is to extract the - // fully-qualified type name from the type URL and pass that to - // proto.MessageType(string). - AnyResolver AnyResolver -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - inputValue := json.RawMessage{} - if err := dec.Decode(&inputValue); err != nil { - return err - } - return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { - dec := json.NewDecoder(r) - return u.UnmarshalNext(dec, pb) -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - return new(Unmarshaler).UnmarshalNext(dec, pb) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func Unmarshal(r io.Reader, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(r, pb) -} - -// UnmarshalString will populate the fields of a protocol buffer based -// on a JSON string. This function is lenient and will decode any options -// permutations of the related Marshaler. -func UnmarshalString(str string, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) -} - -// unmarshalValue converts/copies a value into the target. -// prop may be nil. -func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { - targetType := target.Type() - - // Allocate memory for pointer fields. - if targetType.Kind() == reflect.Ptr { - // If input value is "null" and target is a pointer type, then the field should be treated as not set - // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. - _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) - if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { - return nil - } - target.Set(reflect.New(targetType.Elem())) - - return u.unmarshalValue(target.Elem(), inputValue, prop) - } - - if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, []byte(inputValue)) - } - - // Handle well-known types that are not pointers. - if w, ok := target.Addr().Interface().(wkt); ok { - switch w.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - return u.unmarshalValue(target.Field(0), inputValue, prop) - case "Any": - // Use json.RawMessage pointer type instead of value to support pre-1.8 version. - // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see - // https://github.com/golang/go/issues/14493 - var jsonFields map[string]*json.RawMessage - if err := json.Unmarshal(inputValue, &jsonFields); err != nil { - return err - } - - val, ok := jsonFields["@type"] - if !ok || val == nil { - return errors.New("Any JSON doesn't have '@type'") - } - - var turl string - if err := json.Unmarshal([]byte(*val), &turl); err != nil { - return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) - } - target.Field(0).SetString(turl) - - var m proto.Message - var err error - if u.AnyResolver != nil { - m, err = u.AnyResolver.Resolve(turl) - } else { - m, err = defaultResolveAny(turl) - } - if err != nil { - return err - } - - if _, ok := m.(wkt); ok { - val, ok := jsonFields["value"] - if !ok { - return errors.New("Any JSON doesn't have 'value'") - } - - if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) - } - } else { - delete(jsonFields, "@type") - nestedProto, err := json.Marshal(jsonFields) - if err != nil { - return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) - } - - if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) - } - } - - b, err := proto.Marshal(m) - if err != nil { - return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) - } - target.Field(1).SetBytes(b) - - return nil - case "Duration": - unq, err := strconv.Unquote(string(inputValue)) - if err != nil { - return err - } - - d, err := time.ParseDuration(unq) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - target.Field(0).SetInt(s) - target.Field(1).SetInt(ns) - return nil - case "Timestamp": - unq, err := strconv.Unquote(string(inputValue)) - if err != nil { - return err - } - - t, err := time.Parse(time.RFC3339Nano, unq) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - - target.Field(0).SetInt(t.Unix()) - target.Field(1).SetInt(int64(t.Nanosecond())) - return nil - case "Struct": - var m map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &m); err != nil { - return fmt.Errorf("bad StructValue: %v", err) - } - - target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) - for k, jv := range m { - pv := &stpb.Value{} - if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { - return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) - } - target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) - } - return nil - case "ListValue": - var s []json.RawMessage - if err := json.Unmarshal(inputValue, &s); err != nil { - return fmt.Errorf("bad ListValue: %v", err) - } - - target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s), len(s)))) - for i, sv := range s { - if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { - return err - } - } - return nil - case "Value": - ivStr := string(inputValue) - if ivStr == "null" { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) - } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) - } else if v, err := strconv.Unquote(ivStr); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) - } else if v, err := strconv.ParseBool(ivStr); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) - } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { - lv := &stpb.ListValue{} - target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) - return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) - } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { - sv := &stpb.Struct{} - target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) - return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) - } else { - return fmt.Errorf("unrecognized type for Value %q", ivStr) - } - return nil - } - } - - // Handle enums, which have an underlying type of int32, - // and may appear as strings. - // The case of an enum appearing as a number is handled - // at the bottom of this function. - if inputValue[0] == '"' && prop != nil && prop.Enum != "" { - vmap := proto.EnumValueMap(prop.Enum) - // Don't need to do unquoting; valid enum names - // are from a limited character set. - s := inputValue[1 : len(inputValue)-1] - n, ok := vmap[string(s)] - if !ok { - return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) - } - if target.Kind() == reflect.Ptr { // proto2 - target.Set(reflect.New(targetType.Elem())) - target = target.Elem() - } - target.SetInt(int64(n)) - return nil - } - - // Handle nested messages. - if targetType.Kind() == reflect.Struct { - var jsonFields map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &jsonFields); err != nil { - return err - } - - consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { - // Be liberal in what names we accept; both orig_name and camelName are okay. - fieldNames := acceptedJSONFieldNames(prop) - - vOrig, okOrig := jsonFields[fieldNames.orig] - vCamel, okCamel := jsonFields[fieldNames.camel] - if !okOrig && !okCamel { - return nil, false - } - // If, for some reason, both are present in the data, favour the camelName. - var raw json.RawMessage - if okOrig { - raw = vOrig - delete(jsonFields, fieldNames.orig) - } - if okCamel { - raw = vCamel - delete(jsonFields, fieldNames.camel) - } - return raw, true - } - - sprops := proto.GetProperties(targetType) - for i := 0; i < target.NumField(); i++ { - ft := target.Type().Field(i) - if strings.HasPrefix(ft.Name, "XXX_") { - continue - } - - valueForField, ok := consumeField(sprops.Prop[i]) - if !ok { - continue - } - - if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { - return err - } - } - // Check for any oneof fields. - if len(jsonFields) > 0 { - for _, oop := range sprops.OneofTypes { - raw, ok := consumeField(oop.Prop) - if !ok { - continue - } - nv := reflect.New(oop.Type.Elem()) - target.Field(oop.Field).Set(nv) - if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { - return err - } - } - } - // Handle proto2 extensions. - if len(jsonFields) > 0 { - if ep, ok := target.Addr().Interface().(proto.Message); ok { - for _, ext := range proto.RegisteredExtensions(ep) { - name := fmt.Sprintf("[%s]", ext.Name) - raw, ok := jsonFields[name] - if !ok { - continue - } - delete(jsonFields, name) - nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) - if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { - return err - } - if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { - return err - } - } - } - } - if !u.AllowUnknownFields && len(jsonFields) > 0 { - // Pick any field to be the scapegoat. - var f string - for fname := range jsonFields { - f = fname - break - } - return fmt.Errorf("unknown field %q in %v", f, targetType) - } - return nil - } - - // Handle arrays (which aren't encoded bytes) - if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { - var slc []json.RawMessage - if err := json.Unmarshal(inputValue, &slc); err != nil { - return err - } - if slc != nil { - l := len(slc) - target.Set(reflect.MakeSlice(targetType, l, l)) - for i := 0; i < l; i++ { - if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { - return err - } - } - } - return nil - } - - // Handle maps (whose keys are always strings) - if targetType.Kind() == reflect.Map { - var mp map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &mp); err != nil { - return err - } - if mp != nil { - target.Set(reflect.MakeMap(targetType)) - var keyprop, valprop *proto.Properties - if prop != nil { - // These could still be nil if the protobuf metadata is broken somehow. - // TODO: This won't work because the fields are unexported. - // We should probably just reparse them. - //keyprop, valprop = prop.mkeyprop, prop.mvalprop - } - for ks, raw := range mp { - // Unmarshal map key. The core json library already decoded the key into a - // string, so we handle that specially. Other types were quoted post-serialization. - var k reflect.Value - if targetType.Key().Kind() == reflect.String { - k = reflect.ValueOf(ks) - } else { - k = reflect.New(targetType.Key()).Elem() - if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { - return err - } - } - - // Unmarshal map value. - v := reflect.New(targetType.Elem()).Elem() - if err := u.unmarshalValue(v, raw, valprop); err != nil { - return err - } - target.SetMapIndex(k, v) - } - } - return nil - } - - // 64-bit integers can be encoded as strings. In this case we drop - // the quotes and proceed as normal. - isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 - if isNum && strings.HasPrefix(string(inputValue), `"`) { - inputValue = inputValue[1 : len(inputValue)-1] - } - - // Non-finite numbers can be encoded as strings. - isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 - if isFloat { - if num, ok := nonFinite[string(inputValue)]; ok { - target.SetFloat(num) - return nil - } - } - - // Use the encoding/json for parsing other value types. - return json.Unmarshal(inputValue, target.Addr().Interface()) -} - -// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. -func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { - var prop proto.Properties - prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) - if origName || prop.JSONName == "" { - prop.JSONName = prop.OrigName - } - return &prop -} - -type fieldNames struct { - orig, camel string -} - -func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { - opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} - if prop.JSONName != "" { - opts.camel = prop.JSONName - } - return opts -} - -// Writer wrapper inspired by https://blog.golang.org/errors-are-values -type errWriter struct { - writer io.Writer - err error -} - -func (w *errWriter) write(str string) { - if w.err != nil { - return - } - _, w.err = w.writer.Write([]byte(str)) -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. -// -// Numeric keys are sorted in numeric order per -// https://developers.google.com/protocol-buffers/docs/proto#maps. -type mapKeys []reflect.Value - -func (s mapKeys) Len() int { return len(s) } -func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s mapKeys) Less(i, j int) bool { - if k := s[i].Kind(); k == s[j].Kind() { - switch k { - case reflect.Int32, reflect.Int64: - return s[i].Int() < s[j].Int() - case reflect.Uint32, reflect.Uint64: - return s[i].Uint() < s[j].Uint() - } - } - return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) -} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index e392575b353..00000000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,229 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := extendable(in.Addr().Interface()); ok { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index aa207298f99..00000000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,970 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - // x -= 0x80 << 63 // Always zero. - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err - } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 8b84d1b22d4..00000000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,1362 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index 2ed1cf59666..00000000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index eaad2183126..00000000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,587 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok = extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 1c225504a01..00000000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,897 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index fd982decd66..00000000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,311 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() - case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err - } - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index fb512e2e16d..00000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,484 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index 6b5567d47cd..00000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,270 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index ec2289c0058..00000000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,872 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 965876bf033..00000000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,854 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := tm.writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index 5e14513f28c..00000000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,895 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index b2af97f4a98..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,139 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - -import ( - "fmt" - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" -) - -const googleApis = "type.googleapis.com/" - -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return any.TypeUrl[slash+1:], nil -} - -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) - if err != nil { - return nil, err - } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message -} - -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) - if err != nil { - return nil, err - } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) - } - return reflect.New(t.Elem()).Interface().(proto.Message), nil -} - -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. -// -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { - var err error - d.Message, err = Empty(any) - if err != nil { - return err - } - } - return UnmarshalAny(any, d.Message) - } - - aname, err := AnyMessageName(any) - if err != nil { - return err - } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) - } - return proto.Unmarshal(any.Value, pb) -} - -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - aname, err := AnyMessageName(any) - if err != nil { - return false - } - - return aname == proto.MessageName(pb) -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index f34601723de..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,178 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto - -/* -Package any is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/any.proto - -It has these top-level messages: - Any -*/ -package any - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. - // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: - // - // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } - -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl - } - return "" -} - -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index c0d595da7ab..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package ptypes contains code for interacting with well-known types. -*/ -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index 65cb0f8eb5f..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,102 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" - - durpb "github.com/golang/protobuf/ptypes/duration" -) - -const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index b2410a098eb..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,144 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto - -/* -Package duration is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/duration.proto - -It has these top-level messages: - Duration -*/ -package duration - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Duration) XXX_WellKnownType() string { return "Duration" } - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go deleted file mode 100644 index 4cfe6081879..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ /dev/null @@ -1,380 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/struct.proto - -/* -Package structpb is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/struct.proto - -It has these top-level messages: - Struct - Value - ListValue -*/ -package structpb - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// `NullValue` is a singleton enumeration to represent the null value for the -// `Value` type union. -// -// The JSON representation for `NullValue` is JSON `null`. -type NullValue int32 - -const ( - // Null value. - NullValue_NULL_VALUE NullValue = 0 -) - -var NullValue_name = map[int32]string{ - 0: "NULL_VALUE", -} -var NullValue_value = map[string]int32{ - "NULL_VALUE": 0, -} - -func (x NullValue) String() string { - return proto.EnumName(NullValue_name, int32(x)) -} -func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (NullValue) XXX_WellKnownType() string { return "NullValue" } - -// `Struct` represents a structured data value, consisting of fields -// which map to dynamically typed values. In some languages, `Struct` -// might be supported by a native representation. For example, in -// scripting languages like JS a struct is represented as an -// object. The details of that representation are described together -// with the proto support for the language. -// -// The JSON representation for `Struct` is JSON object. -type Struct struct { - // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *Struct) Reset() { *m = Struct{} } -func (m *Struct) String() string { return proto.CompactTextString(m) } -func (*Struct) ProtoMessage() {} -func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Struct) XXX_WellKnownType() string { return "Struct" } - -func (m *Struct) GetFields() map[string]*Value { - if m != nil { - return m.Fields - } - return nil -} - -// `Value` represents a dynamically typed value which can be either -// null, a number, a string, a boolean, a recursive struct value, or a -// list of values. A producer of value is expected to set one of that -// variants, absence of any variant indicates an error. -// -// The JSON representation for `Value` is JSON value. -type Value struct { - // The kind of value. - // - // Types that are valid to be assigned to Kind: - // *Value_NullValue - // *Value_NumberValue - // *Value_StringValue - // *Value_BoolValue - // *Value_StructValue - // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` -} - -func (m *Value) Reset() { *m = Value{} } -func (m *Value) String() string { return proto.CompactTextString(m) } -func (*Value) ProtoMessage() {} -func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (*Value) XXX_WellKnownType() string { return "Value" } - -type isValue_Kind interface { - isValue_Kind() -} - -type Value_NullValue struct { - NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` -} -type Value_NumberValue struct { - NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"` -} -type Value_StringValue struct { - StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` -} -type Value_BoolValue struct { - BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"` -} -type Value_StructValue struct { - StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` -} -type Value_ListValue struct { - ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` -} - -func (*Value_NullValue) isValue_Kind() {} -func (*Value_NumberValue) isValue_Kind() {} -func (*Value_StringValue) isValue_Kind() {} -func (*Value_BoolValue) isValue_Kind() {} -func (*Value_StructValue) isValue_Kind() {} -func (*Value_ListValue) isValue_Kind() {} - -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind - } - return nil -} - -func (m *Value) GetNullValue() NullValue { - if x, ok := m.GetKind().(*Value_NullValue); ok { - return x.NullValue - } - return NullValue_NULL_VALUE -} - -func (m *Value) GetNumberValue() float64 { - if x, ok := m.GetKind().(*Value_NumberValue); ok { - return x.NumberValue - } - return 0 -} - -func (m *Value) GetStringValue() string { - if x, ok := m.GetKind().(*Value_StringValue); ok { - return x.StringValue - } - return "" -} - -func (m *Value) GetBoolValue() bool { - if x, ok := m.GetKind().(*Value_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (m *Value) GetStructValue() *Struct { - if x, ok := m.GetKind().(*Value_StructValue); ok { - return x.StructValue - } - return nil -} - -func (m *Value) GetListValue() *ListValue { - if x, ok := m.GetKind().(*Value_ListValue); ok { - return x.ListValue - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ - (*Value_NullValue)(nil), - (*Value_NumberValue)(nil), - (*Value_StringValue)(nil), - (*Value_BoolValue)(nil), - (*Value_StructValue)(nil), - (*Value_ListValue)(nil), - } -} - -func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Value) - // kind - switch x := m.Kind.(type) { - case *Value_NullValue: - b.EncodeVarint(1<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.NullValue)) - case *Value_NumberValue: - b.EncodeVarint(2<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.NumberValue)) - case *Value_StringValue: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeStringBytes(x.StringValue) - case *Value_BoolValue: - t := uint64(0) - if x.BoolValue { - t = 1 - } - b.EncodeVarint(4<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *Value_StructValue: - b.EncodeVarint(5<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.StructValue); err != nil { - return err - } - case *Value_ListValue: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ListValue); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Value.Kind has unexpected type %T", x) - } - return nil -} - -func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Value) - switch tag { - case 1: // kind.null_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Kind = &Value_NullValue{NullValue(x)} - return true, err - case 2: // kind.number_value - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Kind = &Value_NumberValue{math.Float64frombits(x)} - return true, err - case 3: // kind.string_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Kind = &Value_StringValue{x} - return true, err - case 4: // kind.bool_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Kind = &Value_BoolValue{x != 0} - return true, err - case 5: // kind.struct_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Struct) - err := b.DecodeMessage(msg) - m.Kind = &Value_StructValue{msg} - return true, err - case 6: // kind.list_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ListValue) - err := b.DecodeMessage(msg) - m.Kind = &Value_ListValue{msg} - return true, err - default: - return false, nil - } -} - -func _Value_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Value) - // kind - switch x := m.Kind.(type) { - case *Value_NullValue: - n += proto.SizeVarint(1<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.NullValue)) - case *Value_NumberValue: - n += proto.SizeVarint(2<<3 | proto.WireFixed64) - n += 8 - case *Value_StringValue: - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.StringValue))) - n += len(x.StringValue) - case *Value_BoolValue: - n += proto.SizeVarint(4<<3 | proto.WireVarint) - n += 1 - case *Value_StructValue: - s := proto.Size(x.StructValue) - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Value_ListValue: - s := proto.Size(x.ListValue) - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// `ListValue` is a wrapper around a repeated field of values. -// -// The JSON representation for `ListValue` is JSON array. -type ListValue struct { - // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` -} - -func (m *ListValue) Reset() { *m = ListValue{} } -func (m *ListValue) String() string { return proto.CompactTextString(m) } -func (*ListValue) ProtoMessage() {} -func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (*ListValue) XXX_WellKnownType() string { return "ListValue" } - -func (m *ListValue) GetValues() []*Value { - if m != nil { - return m.Values - } - return nil -} - -func init() { - proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") - proto.RegisterType((*Value)(nil), "google.protobuf.Value") - proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") - proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) -} - -func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 417 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, - 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, - 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, - 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, - 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, - 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, - 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, - 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, - 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, - 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, - 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, - 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, - 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, - 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, - 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, - 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, - 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, - 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, - 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, - 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, - 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, - 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, - 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, - 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, - 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, - 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, - 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 47f10dbc2cc..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,134 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" - - tspb "github.com/golang/protobuf/ptypes/timestamp" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := &tspb.Timestamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index e23e4a25daf..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,160 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto - -/* -Package timestamp is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/timestamp.proto - -It has these top-level messages: - Timestamp -*/ -package timestamp - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) -// to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE deleted file mode 100644 index 6b0b1270ff0..00000000000 --- a/vendor/github.com/googleapis/gnostic/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go deleted file mode 100644 index 0e32451a320..00000000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go +++ /dev/null @@ -1,8728 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -package openapi_v2 - -import ( - "fmt" - "github.com/googleapis/gnostic/compiler" - "gopkg.in/yaml.v2" - "regexp" - "strings" -) - -// Version returns the package name (and OpenAPI version). -func Version() string { - return "openapi_v2" -} - -// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. -func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { - errors := make([]error, 0) - x := &AdditionalPropertiesItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) - if matchingError == nil { - x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // bool boolean = 2; - boolValue, ok := in.(bool) - if ok { - x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewAny creates an object of type Any if possible, returning an error if not. -func NewAny(in interface{}, context *compiler.Context) (*Any, error) { - errors := make([]error, 0) - x := &Any{} - bytes, _ := yaml.Marshal(in) - x.Yaml = string(bytes) - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. -func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { - errors := make([]error, 0) - x := &ApiKeySecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [apiKey] - if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header query] - if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. -func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { - errors := make([]error, 0) - x := &BasicAuthenticationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [basic] - if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. -func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { - errors := make([]error, 0) - x := &BodyParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "schema"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "required", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [body] - if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool required = 4; - v4 := compiler.MapValueForKey(m, "required") - if v4 != nil { - x.Required, ok = v4.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema schema = 5; - v5 := compiler.MapValueForKey(m, "schema") - if v5 != nil { - var err error - x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewContact creates an object of type Contact if possible, returning an error if not. -func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { - errors := make([]error, 0) - x := &Contact{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"email", "name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string email = 3; - v3 := compiler.MapValueForKey(m, "email") - if v3 != nil { - x.Email, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefault creates an object of type Default if possible, returning an error if not. -func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { - errors := make([]error, 0) - x := &Default{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefinitions creates an object of type Definitions if possible, returning an error if not. -func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { - errors := make([]error, 0) - x := &Definitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDocument creates an object of type Document if possible, returning an error if not. -func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { - errors := make([]error, 0) - x := &Document{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"info", "paths", "swagger"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string swagger = 1; - v1 := compiler.MapValueForKey(m, "swagger") - if v1 != nil { - x.Swagger, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [2.0] - if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Info info = 2; - v2 := compiler.MapValueForKey(m, "info") - if v2 != nil { - var err error - x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) - if err != nil { - errors = append(errors, err) - } - } - // string host = 3; - v3 := compiler.MapValueForKey(m, "host") - if v3 != nil { - x.Host, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string base_path = 4; - v4 := compiler.MapValueForKey(m, "basePath") - if v4 != nil { - x.BasePath, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string schemes = 5; - v5 := compiler.MapValueForKey(m, "schemes") - if v5 != nil { - v, ok := v5.([]interface{}) - if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 6; - v6 := compiler.MapValueForKey(m, "consumes") - if v6 != nil { - v, ok := v6.([]interface{}) - if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 7; - v7 := compiler.MapValueForKey(m, "produces") - if v7 != nil { - v, ok := v7.([]interface{}) - if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Paths paths = 8; - v8 := compiler.MapValueForKey(m, "paths") - if v8 != nil { - var err error - x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) - if err != nil { - errors = append(errors, err) - } - } - // Definitions definitions = 9; - v9 := compiler.MapValueForKey(m, "definitions") - if v9 != nil { - var err error - x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) - if err != nil { - errors = append(errors, err) - } - } - // ParameterDefinitions parameters = 10; - v10 := compiler.MapValueForKey(m, "parameters") - if v10 != nil { - var err error - x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - } - // ResponseDefinitions responses = 11; - v11 := compiler.MapValueForKey(m, "responses") - if v11 != nil { - var err error - x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // SecurityDefinitions security_definitions = 13; - v13 := compiler.MapValueForKey(m, "securityDefinitions") - if v13 != nil { - var err error - x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Tag tags = 14; - v14 := compiler.MapValueForKey(m, "tags") - if v14 != nil { - // repeated Tag - x.Tags = make([]*Tag, 0) - a, ok := v14.([]interface{}) - if ok { - for _, item := range a { - y, err := NewTag(item, compiler.NewContext("tags", context)) - if err != nil { - errors = append(errors, err) - } - x.Tags = append(x.Tags, y) - } - } - } - // ExternalDocs external_docs = 15; - v15 := compiler.MapValueForKey(m, "externalDocs") - if v15 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 16; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExamples creates an object of type Examples if possible, returning an error if not. -func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { - errors := make([]error, 0) - x := &Examples{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. -func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { - errors := make([]error, 0) - x := &ExternalDocs{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"url"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. -func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { - errors := make([]error, 0) - x := &FileSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string format = 1; - v1 := compiler.MapValueForKey(m, "format") - if v1 != nil { - x.Format, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 2; - v2 := compiler.MapValueForKey(m, "title") - if v2 != nil { - x.Title, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 4; - v4 := compiler.MapValueForKey(m, "default") - if v4 != nil { - var err error - x.Default, err = NewAny(v4, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string required = 5; - v5 := compiler.MapValueForKey(m, "required") - if v5 != nil { - v, ok := v5.([]interface{}) - if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [file] - if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 7; - v7 := compiler.MapValueForKey(m, "readOnly") - if v7 != nil { - x.ReadOnly, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 8; - v8 := compiler.MapValueForKey(m, "externalDocs") - if v8 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 9; - v9 := compiler.MapValueForKey(m, "example") - if v9 != nil { - var err error - x.Example, err = NewAny(v9, compiler.NewContext("example", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. -func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { - errors := make([]error, 0) - x := &FormDataParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [formData] - if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array file] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = v7.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = v9.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := v16.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = v17.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := v19.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = v20.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeader creates an object of type Header if possible, returning an error if not. -func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { - errors := make([]error, 0) - x := &Header{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := v10.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = v12.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := v13.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = v15.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 18; - v18 := compiler.MapValueForKey(m, "description") - if v18 != nil { - x.Description, ok = v18.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 19; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. -func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { - errors := make([]error, 0) - x := &HeaderParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header] - if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = v8.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = v16.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = v19.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaders creates an object of type Headers if possible, returning an error if not. -func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { - errors := make([]error, 0) - x := &Headers{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedHeader additional_properties = 1; - // MAP: Header - x.AdditionalProperties = make([]*NamedHeader, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedHeader{} - pair.Name = k - var err error - pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewInfo creates an object of type Info if possible, returning an error if not. -func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { - errors := make([]error, 0) - x := &Info{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"title", "version"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string title = 1; - v1 := compiler.MapValueForKey(m, "title") - if v1 != nil { - x.Title, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string version = 2; - v2 := compiler.MapValueForKey(m, "version") - if v2 != nil { - x.Version, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string terms_of_service = 4; - v4 := compiler.MapValueForKey(m, "termsOfService") - if v4 != nil { - x.TermsOfService, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Contact contact = 5; - v5 := compiler.MapValueForKey(m, "contact") - if v5 != nil { - var err error - x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) - if err != nil { - errors = append(errors, err) - } - } - // License license = 6; - v6 := compiler.MapValueForKey(m, "license") - if v6 != nil { - var err error - x.License, err = NewLicense(v6, compiler.NewContext("license", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. -func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { - errors := make([]error, 0) - x := &ItemsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Schema = make([]*Schema, 0) - y, err := NewSchema(m, compiler.NewContext("", context)) - if err != nil { - return nil, err - } - x.Schema = append(x.Schema, y) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. -func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { - errors := make([]error, 0) - x := &JsonReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"$ref"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"$ref", "description"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewLicense creates an object of type License if possible, returning an error if not. -func NewLicense(in interface{}, context *compiler.Context) (*License, error) { - errors := make([]error, 0) - x := &License{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. -func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { - errors := make([]error, 0) - x := &NamedAny{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewAny(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. -func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { - errors := make([]error, 0) - x := &NamedHeader{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Header value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. -func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { - errors := make([]error, 0) - x := &NamedParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Parameter value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. -func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { - errors := make([]error, 0) - x := &NamedPathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PathItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. -func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { - errors := make([]error, 0) - x := &NamedResponse{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Response value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. -func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { - errors := make([]error, 0) - x := &NamedResponseValue{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ResponseValue value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. -func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { - errors := make([]error, 0) - x := &NamedSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. -func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &NamedSecurityDefinitionsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SecurityDefinitionsItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedString creates an object of type NamedString if possible, returning an error if not. -func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { - errors := make([]error, 0) - x := &NamedString{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - x.Value, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. -func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { - errors := make([]error, 0) - x := &NamedStringArray{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // StringArray value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. -func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { - errors := make([]error, 0) - x := &NonBodyParameter{} - matched := false - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // HeaderParameterSubSchema header_parameter_sub_schema = 1; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // QueryParameterSubSchema query_parameter_sub_schema = 3; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // PathParameterSubSchema path_parameter_sub_schema = 4; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. -func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { - errors := make([]error, 0) - x := &Oauth2AccessCodeSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [accessCode] - if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string token_url = 5; - v5 := compiler.MapValueForKey(m, "tokenUrl") - if v5 != nil { - x.TokenUrl, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 6; - v6 := compiler.MapValueForKey(m, "description") - if v6 != nil { - x.Description, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. -func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ApplicationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [application] - if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. -func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ImplicitSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [implicit] - if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. -func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { - errors := make([]error, 0) - x := &Oauth2PasswordSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [password] - if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. -func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { - errors := make([]error, 0) - x := &Oauth2Scopes{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedString additional_properties = 1; - // MAP: string - x.AdditionalProperties = make([]*NamedString, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedString{} - pair.Name = k - pair.Value = v.(string) - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOperation creates an object of type Operation if possible, returning an error if not. -func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { - errors := make([]error, 0) - x := &Operation{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"responses"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated string tags = 1; - v1 := compiler.MapValueForKey(m, "tags") - if v1 != nil { - v, ok := v1.([]interface{}) - if ok { - x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string summary = 2; - v2 := compiler.MapValueForKey(m, "summary") - if v2 != nil { - x.Summary, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 4; - v4 := compiler.MapValueForKey(m, "externalDocs") - if v4 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // string operation_id = 5; - v5 := compiler.MapValueForKey(m, "operationId") - if v5 != nil { - x.OperationId, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 6; - v6 := compiler.MapValueForKey(m, "produces") - if v6 != nil { - v, ok := v6.([]interface{}) - if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 7; - v7 := compiler.MapValueForKey(m, "consumes") - if v7 != nil { - v, ok := v7.([]interface{}) - if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated ParametersItem parameters = 8; - v8 := compiler.MapValueForKey(m, "parameters") - if v8 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := v8.([]interface{}) - if ok { - for _, item := range a { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // Responses responses = 9; - v9 := compiler.MapValueForKey(m, "responses") - if v9 != nil { - var err error - x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string schemes = 10; - v10 := compiler.MapValueForKey(m, "schemes") - if v10 != nil { - v, ok := v10.([]interface{}) - if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool deprecated = 11; - v11 := compiler.MapValueForKey(m, "deprecated") - if v11 != nil { - x.Deprecated, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // repeated NamedAny vendor_extension = 13; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameter creates an object of type Parameter if possible, returning an error if not. -func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { - errors := make([]error, 0) - x := &Parameter{} - matched := false - // BodyParameter body_parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) - if matchingError == nil { - x.Oneof = &Parameter_BodyParameter{BodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // NonBodyParameter non_body_parameter = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) - if matchingError == nil { - x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. -func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { - errors := make([]error, 0) - x := &ParameterDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedParameter additional_properties = 1; - // MAP: Parameter - x.AdditionalProperties = make([]*NamedParameter, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedParameter{} - pair.Name = k - var err error - pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. -func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { - errors := make([]error, 0) - x := &ParametersItem{} - matched := false - // Parameter parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) - if matchingError == nil { - x.Oneof = &ParametersItem_Parameter{Parameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) - if matchingError == nil { - x.Oneof = &ParametersItem_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathItem creates an object of type PathItem if possible, returning an error if not. -func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { - errors := make([]error, 0) - x := &PathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Operation get = 2; - v2 := compiler.MapValueForKey(m, "get") - if v2 != nil { - var err error - x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation put = 3; - v3 := compiler.MapValueForKey(m, "put") - if v3 != nil { - var err error - x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation post = 4; - v4 := compiler.MapValueForKey(m, "post") - if v4 != nil { - var err error - x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation delete = 5; - v5 := compiler.MapValueForKey(m, "delete") - if v5 != nil { - var err error - x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation options = 6; - v6 := compiler.MapValueForKey(m, "options") - if v6 != nil { - var err error - x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation head = 7; - v7 := compiler.MapValueForKey(m, "head") - if v7 != nil { - var err error - x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation patch = 8; - v8 := compiler.MapValueForKey(m, "patch") - if v8 != nil { - var err error - x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated ParametersItem parameters = 9; - v9 := compiler.MapValueForKey(m, "parameters") - if v9 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := v9.([]interface{}) - if ok { - for _, item := range a { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. -func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { - errors := make([]error, 0) - x := &PathParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"required"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [path] - if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = v8.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = v16.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = v19.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPaths creates an object of type Paths if possible, returning an error if not. -func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { - errors := make([]error, 0) - x := &Paths{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern0, pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedAny vendor_extension = 1; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - // repeated NamedPathItem path = 2; - // MAP: PathItem ^/ - x.Path = make([]*NamedPathItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "/") { - pair := &NamedPathItem{} - pair.Name = k - var err error - pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.Path = append(x.Path, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. -func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { - errors := make([]error, 0) - x := &PrimitivesItems{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := v10.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = v12.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := v13.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = v15.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 18; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewProperties creates an object of type Properties if possible, returning an error if not. -func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { - errors := make([]error, 0) - x := &Properties{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. -func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { - errors := make([]error, 0) - x := &QueryParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [query] - if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = v7.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = v9.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := v16.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = v17.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := v19.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = v20.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponse creates an object of type Response if possible, returning an error if not. -func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { - errors := make([]error, 0) - x := &Response{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"description"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "examples", "headers", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SchemaItem schema = 2; - v2 := compiler.MapValueForKey(m, "schema") - if v2 != nil { - var err error - x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) - if err != nil { - errors = append(errors, err) - } - } - // Headers headers = 3; - v3 := compiler.MapValueForKey(m, "headers") - if v3 != nil { - var err error - x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) - if err != nil { - errors = append(errors, err) - } - } - // Examples examples = 4; - v4 := compiler.MapValueForKey(m, "examples") - if v4 != nil { - var err error - x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. -func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { - errors := make([]error, 0) - x := &ResponseDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedResponse additional_properties = 1; - // MAP: Response - x.AdditionalProperties = make([]*NamedResponse, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedResponse{} - pair.Name = k - var err error - pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. -func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { - errors := make([]error, 0) - x := &ResponseValue{} - matched := false - // Response response = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewResponse(m, compiler.NewContext("response", context)) - if matchingError == nil { - x.Oneof = &ResponseValue_Response{Response: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) - if matchingError == nil { - x.Oneof = &ResponseValue_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponses creates an object of type Responses if possible, returning an error if not. -func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { - errors := make([]error, 0) - x := &Responses{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern2, pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedResponseValue response_code = 1; - // MAP: ResponseValue ^([0-9]{3})$|^(default)$ - x.ResponseCode = make([]*NamedResponseValue, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if pattern2.MatchString(k) { - pair := &NamedResponseValue{} - pair.Name = k - var err error - pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.ResponseCode = append(x.ResponseCode, pair) - } - } - } - // repeated NamedAny vendor_extension = 2; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchema creates an object of type Schema if possible, returning an error if not. -func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { - errors := make([]error, 0) - x := &Schema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 3; - v3 := compiler.MapValueForKey(m, "title") - if v3 != nil { - x.Title, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float multiple_of = 6; - v6 := compiler.MapValueForKey(m, "multipleOf") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.MultipleOf = v6 - case float32: - x.MultipleOf = float64(v6) - case uint64: - x.MultipleOf = float64(v6) - case uint32: - x.MultipleOf = float64(v6) - case int64: - x.MultipleOf = float64(v6) - case int32: - x.MultipleOf = float64(v6) - case int: - x.MultipleOf = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float maximum = 7; - v7 := compiler.MapValueForKey(m, "maximum") - if v7 != nil { - switch v7 := v7.(type) { - case float64: - x.Maximum = v7 - case float32: - x.Maximum = float64(v7) - case uint64: - x.Maximum = float64(v7) - case uint32: - x.Maximum = float64(v7) - case int64: - x.Maximum = float64(v7) - case int32: - x.Maximum = float64(v7) - case int: - x.Maximum = float64(v7) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 8; - v8 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v8 != nil { - x.ExclusiveMaximum, ok = v8.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 9; - v9 := compiler.MapValueForKey(m, "minimum") - if v9 != nil { - switch v9 := v9.(type) { - case float64: - x.Minimum = v9 - case float32: - x.Minimum = float64(v9) - case uint64: - x.Minimum = float64(v9) - case uint32: - x.Minimum = float64(v9) - case int64: - x.Minimum = float64(v9) - case int32: - x.Minimum = float64(v9) - case int: - x.Minimum = float64(v9) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 10; - v10 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v10 != nil { - x.ExclusiveMinimum, ok = v10.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 11; - v11 := compiler.MapValueForKey(m, "maxLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 12; - v12 := compiler.MapValueForKey(m, "minLength") - if v12 != nil { - t, ok := v12.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 13; - v13 := compiler.MapValueForKey(m, "pattern") - if v13 != nil { - x.Pattern, ok = v13.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 14; - v14 := compiler.MapValueForKey(m, "maxItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 15; - v15 := compiler.MapValueForKey(m, "minItems") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 16; - v16 := compiler.MapValueForKey(m, "uniqueItems") - if v16 != nil { - x.UniqueItems, ok = v16.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_properties = 17; - v17 := compiler.MapValueForKey(m, "maxProperties") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_properties = 18; - v18 := compiler.MapValueForKey(m, "minProperties") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string required = 19; - v19 := compiler.MapValueForKey(m, "required") - if v19 != nil { - v, ok := v19.([]interface{}) - if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // AdditionalPropertiesItem additional_properties = 21; - v21 := compiler.MapValueForKey(m, "additionalProperties") - if v21 != nil { - var err error - x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) - if err != nil { - errors = append(errors, err) - } - } - // TypeItem type = 22; - v22 := compiler.MapValueForKey(m, "type") - if v22 != nil { - var err error - x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) - if err != nil { - errors = append(errors, err) - } - } - // ItemsItem items = 23; - v23 := compiler.MapValueForKey(m, "items") - if v23 != nil { - var err error - x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Schema all_of = 24; - v24 := compiler.MapValueForKey(m, "allOf") - if v24 != nil { - // repeated Schema - x.AllOf = make([]*Schema, 0) - a, ok := v24.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSchema(item, compiler.NewContext("allOf", context)) - if err != nil { - errors = append(errors, err) - } - x.AllOf = append(x.AllOf, y) - } - } - } - // Properties properties = 25; - v25 := compiler.MapValueForKey(m, "properties") - if v25 != nil { - var err error - x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) - if err != nil { - errors = append(errors, err) - } - } - // string discriminator = 26; - v26 := compiler.MapValueForKey(m, "discriminator") - if v26 != nil { - x.Discriminator, ok = v26.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 27; - v27 := compiler.MapValueForKey(m, "readOnly") - if v27 != nil { - x.ReadOnly, ok = v27.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Xml xml = 28; - v28 := compiler.MapValueForKey(m, "xml") - if v28 != nil { - var err error - x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) - if err != nil { - errors = append(errors, err) - } - } - // ExternalDocs external_docs = 29; - v29 := compiler.MapValueForKey(m, "externalDocs") - if v29 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 30; - v30 := compiler.MapValueForKey(m, "example") - if v30 != nil { - var err error - x.Example, err = NewAny(v30, compiler.NewContext("example", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 31; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. -func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { - errors := make([]error, 0) - x := &SchemaItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) - if matchingError == nil { - x.Oneof = &SchemaItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // FileSchema file_schema = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) - if matchingError == nil { - x.Oneof = &SchemaItem_FileSchema{FileSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. -func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { - errors := make([]error, 0) - x := &SecurityDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSecurityDefinitionsItem additional_properties = 1; - // MAP: SecurityDefinitionsItem - x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSecurityDefinitionsItem{} - pair.Name = k - var err error - pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. -func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &SecurityDefinitionsItem{} - matched := false - // BasicAuthenticationSecurity basic_authentication_security = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // ApiKeySecurity api_key_security = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ImplicitSecurity oauth2_implicit_security = 3; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2PasswordSecurity oauth2_password_security = 4; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ApplicationSecurity oauth2_application_security = 5; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. -func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { - errors := make([]error, 0) - x := &SecurityRequirement{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedStringArray additional_properties = 1; - // MAP: StringArray - x.AdditionalProperties = make([]*NamedStringArray, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedStringArray{} - pair.Name = k - var err error - pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewStringArray creates an object of type StringArray if possible, returning an error if not. -func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { - errors := make([]error, 0) - x := &StringArray{} - a, ok := in.([]interface{}) - if !ok { - message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Value = make([]string, 0) - for _, s := range a { - x.Value = append(x.Value, s.(string)) - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTag creates an object of type Tag if possible, returning an error if not. -func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { - errors := make([]error, 0) - x := &Tag{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "externalDocs", "name"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 3; - v3 := compiler.MapValueForKey(m, "externalDocs") - if v3 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. -func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { - errors := make([]error, 0) - x := &TypeItem{} - switch in := in.(type) { - case string: - x.Value = make([]string, 0) - x.Value = append(x.Value, in) - case []interface{}: - x.Value = make([]string, 0) - for _, v := range in { - value, ok := v.(string) - if ok { - x.Value = append(x.Value, value) - } else { - message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) - errors = append(errors, compiler.NewError(context, message)) - } - } - default: - message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. -func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { - errors := make([]error, 0) - x := &VendorExtension{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewXml creates an object of type Xml if possible, returning an error if not. -func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { - errors := make([]error, 0) - x := &Xml{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string namespace = 2; - v2 := compiler.MapValueForKey(m, "namespace") - if v2 != nil { - x.Namespace, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string prefix = 3; - v3 := compiler.MapValueForKey(m, "prefix") - if v3 != nil { - x.Prefix, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool attribute = 4; - v4 := compiler.MapValueForKey(m, "attribute") - if v4 != nil { - x.Attribute, ok = v4.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool wrapped = 5; - v5 := compiler.MapValueForKey(m, "wrapped") - if v5 != nil { - x.Wrapped, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. -func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Any objects. -func (m *Any) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ApiKeySecurity objects. -func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. -func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BodyParameter objects. -func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Contact objects. -func (m *Contact) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Default objects. -func (m *Default) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Definitions objects. -func (m *Definitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Document objects. -func (m *Document) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Info != nil { - _, err := m.Info.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Paths != nil { - _, err := m.Paths.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Definitions != nil { - _, err := m.Definitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Parameters != nil { - _, err := m.Parameters.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.SecurityDefinitions != nil { - _, err := m.SecurityDefinitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Tags { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Examples objects. -func (m *Examples) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ExternalDocs objects. -func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FileSchema objects. -func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. -func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Header objects. -func (m *Header) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. -func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Headers objects. -func (m *Headers) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Info objects. -func (m *Info) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Contact != nil { - _, err := m.Contact.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.License != nil { - _, err := m.License.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ItemsItem objects. -func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.Schema { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside JsonReference objects. -func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewJsonReference(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside License objects. -func (m *License) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedAny objects. -func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedHeader objects. -func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedParameter objects. -func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedPathItem objects. -func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponse objects. -func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponseValue objects. -func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSchema objects. -func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. -func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedString objects. -func (m *NamedString) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedStringArray objects. -func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NonBodyParameter objects. -func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) - if ok { - _, err := p.HeaderParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) - if ok { - _, err := p.FormDataParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) - if ok { - _, err := p.QueryParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) - if ok { - _, err := p.PathParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. -func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. -func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. -func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. -func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2Scopes objects. -func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Operation objects. -func (m *Operation) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Parameter objects. -func (m *Parameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*Parameter_BodyParameter) - if ok { - _, err := p.BodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*Parameter_NonBodyParameter) - if ok { - _, err := p.NonBodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParameterDefinitions objects. -func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParametersItem objects. -func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ParametersItem_Parameter) - if ok { - _, err := p.Parameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ParametersItem_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewParametersItem(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathItem objects. -func (m *PathItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewPathItem(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Get != nil { - _, err := m.Get.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Put != nil { - _, err := m.Put.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Post != nil { - _, err := m.Post.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Delete != nil { - _, err := m.Delete.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Options != nil { - _, err := m.Options.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Head != nil { - _, err := m.Head.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Patch != nil { - _, err := m.Patch.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathParameterSubSchema objects. -func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Paths objects. -func (m *Paths) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.Path { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PrimitivesItems objects. -func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Properties objects. -func (m *Properties) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside QueryParameterSubSchema objects. -func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Response objects. -func (m *Response) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Headers != nil { - _, err := m.Headers.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Examples != nil { - _, err := m.Examples.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseDefinitions objects. -func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseValue objects. -func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ResponseValue_Response) - if ok { - _, err := p.Response.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ResponseValue_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewResponseValue(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Responses objects. -func (m *Responses) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.ResponseCode { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Schema objects. -func (m *Schema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewSchema(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.AdditionalProperties != nil { - _, err := m.AdditionalProperties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Type != nil { - _, err := m.Type.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.AllOf { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Properties != nil { - _, err := m.Properties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Xml != nil { - _, err := m.Xml.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SchemaItem objects. -func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SchemaItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SchemaItem_FileSchema) - if ok { - _, err := p.FileSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitions objects. -func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. -func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) - if ok { - _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) - if ok { - _, err := p.ApiKeySecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) - if ok { - _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) - if ok { - _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) - if ok { - _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) - if ok { - _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityRequirement objects. -func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside StringArray objects. -func (m *StringArray) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Tag objects. -func (m *Tag) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside TypeItem objects. -func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside VendorExtension objects. -func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Xml objects. -func (m *Xml) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. -func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // AdditionalPropertiesItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return v1.Boolean - } - return nil -} - -// ToRawInfo returns a description of Any suitable for JSON or YAML export. -func (m *Any) ToRawInfo() interface{} { - var err error - var info1 []yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info1) - if err == nil { - return info1 - } - var info2 yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info2) - if err == nil { - return info2 - } - var info3 interface{} - err = yaml.Unmarshal([]byte(m.Yaml), &info3) - if err == nil { - return info3 - } - return nil -} - -// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. -func (m *ApiKeySecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. -func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. -func (m *BodyParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.Schema != nil { - info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) - } - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Contact suitable for JSON or YAML export. -func (m *Contact) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) - } - if m.Email != "" { - info = append(info, yaml.MapItem{"email", m.Email}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Default suitable for JSON or YAML export. -func (m *Default) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} - return info -} - -// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. -func (m *Definitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Document suitable for JSON or YAML export. -func (m *Document) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Swagger != "" { - info = append(info, yaml.MapItem{"swagger", m.Swagger}) - } - if m.Info != nil { - info = append(info, yaml.MapItem{"info", m.Info.ToRawInfo()}) - } - // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Host != "" { - info = append(info, yaml.MapItem{"host", m.Host}) - } - if m.BasePath != "" { - info = append(info, yaml.MapItem{"basePath", m.BasePath}) - } - if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{"schemes", m.Schemes}) - } - if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{"consumes", m.Consumes}) - } - if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{"produces", m.Produces}) - } - if m.Paths != nil { - info = append(info, yaml.MapItem{"paths", m.Paths.ToRawInfo()}) - } - // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Definitions != nil { - info = append(info, yaml.MapItem{"definitions", m.Definitions.ToRawInfo()}) - } - // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Parameters != nil { - info = append(info, yaml.MapItem{"parameters", m.Parameters.ToRawInfo()}) - } - // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Responses != nil { - info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) - } - // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Security) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Security { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"security", items}) - } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.SecurityDefinitions != nil { - info = append(info, yaml.MapItem{"securityDefinitions", m.SecurityDefinitions.ToRawInfo()}) - } - // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Tags) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Tags { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"tags", items}) - } - // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Examples suitable for JSON or YAML export. -func (m *Examples) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. -func (m *ExternalDocs) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. -func (m *FileSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Required) != 0 { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.ReadOnly != false { - info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Example != nil { - info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) - } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. -func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Header suitable for JSON or YAML export. -func (m *Header) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. -func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Headers suitable for JSON or YAML export. -func (m *Headers) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Info suitable for JSON or YAML export. -func (m *Info) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) - } - if m.Version != "" { - info = append(info, yaml.MapItem{"version", m.Version}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.TermsOfService != "" { - info = append(info, yaml.MapItem{"termsOfService", m.TermsOfService}) - } - if m.Contact != nil { - info = append(info, yaml.MapItem{"contact", m.Contact.ToRawInfo()}) - } - // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.License != nil { - info = append(info, yaml.MapItem{"license", m.License.ToRawInfo()}) - } - // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. -func (m *ItemsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if len(m.Schema) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Schema { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"schema", items}) - } - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - return info -} - -// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. -func (m *JsonReference) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - return info -} - -// ToRawInfo returns a description of License suitable for JSON or YAML export. -func (m *License) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. -func (m *NamedAny) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. -func (m *NamedHeader) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. -func (m *NamedParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. -func (m *NamedPathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. -func (m *NamedResponse) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. -func (m *NamedResponseValue) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. -func (m *NamedSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. -func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. -func (m *NamedString) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Value != "" { - info = append(info, yaml.MapItem{"value", m.Value}) - } - return info -} - -// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. -func (m *NamedStringArray) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. -func (m *NonBodyParameter) ToRawInfo() interface{} { - // ONE OF WRAPPER - // NonBodyParameter - // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetHeaderParameterSubSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFormDataParameterSubSchema() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetQueryParameterSubSchema() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetPathParameterSubSchema() - if v3 != nil { - return v3.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. -func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) - } - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. -func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. -func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. -func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. -func (m *Oauth2Scopes) ToRawInfo() interface{} { - info := yaml.MapSlice{} - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Operation suitable for JSON or YAML export. -func (m *Operation) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if len(m.Tags) != 0 { - info = append(info, yaml.MapItem{"tags", m.Tags}) - } - if m.Summary != "" { - info = append(info, yaml.MapItem{"summary", m.Summary}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.OperationId != "" { - info = append(info, yaml.MapItem{"operationId", m.OperationId}) - } - if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{"produces", m.Produces}) - } - if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{"consumes", m.Consumes}) - } - if len(m.Parameters) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"parameters", items}) - } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} - if m.Responses != nil { - info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) - } - // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{"schemes", m.Schemes}) - } - if m.Deprecated != false { - info = append(info, yaml.MapItem{"deprecated", m.Deprecated}) - } - if len(m.Security) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Security { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"security", items}) - } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. -func (m *Parameter) ToRawInfo() interface{} { - // ONE OF WRAPPER - // Parameter - // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBodyParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetNonBodyParameter() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. -func (m *ParameterDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. -func (m *ParametersItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // ParametersItem - // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. -func (m *PathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) - } - if m.Get != nil { - info = append(info, yaml.MapItem{"get", m.Get.ToRawInfo()}) - } - // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Put != nil { - info = append(info, yaml.MapItem{"put", m.Put.ToRawInfo()}) - } - // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Post != nil { - info = append(info, yaml.MapItem{"post", m.Post.ToRawInfo()}) - } - // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Delete != nil { - info = append(info, yaml.MapItem{"delete", m.Delete.ToRawInfo()}) - } - // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Options != nil { - info = append(info, yaml.MapItem{"options", m.Options.ToRawInfo()}) - } - // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Head != nil { - info = append(info, yaml.MapItem{"head", m.Head.ToRawInfo()}) - } - // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Patch != nil { - info = append(info, yaml.MapItem{"patch", m.Patch.ToRawInfo()}) - } - // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Parameters) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"parameters", items}) - } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. -func (m *PathParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Paths suitable for JSON or YAML export. -func (m *Paths) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - if m.Path != nil { - for _, item := range m.Path { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. -func (m *PrimitivesItems) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Properties suitable for JSON or YAML export. -func (m *Properties) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. -func (m *QueryParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Response suitable for JSON or YAML export. -func (m *Response) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Schema != nil { - info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) - } - // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Headers != nil { - info = append(info, yaml.MapItem{"headers", m.Headers.ToRawInfo()}) - } - // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Examples != nil { - info = append(info, yaml.MapItem{"examples", m.Examples.ToRawInfo()}) - } - // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. -func (m *ResponseDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. -func (m *ResponseValue) ToRawInfo() interface{} { - // ONE OF WRAPPER - // ResponseValue - // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetResponse() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of Responses suitable for JSON or YAML export. -func (m *Responses) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.ResponseCode != nil { - for _, item := range m.ResponseCode { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Schema suitable for JSON or YAML export. -func (m *Schema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) - } - if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) - } - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) - } - if m.MaxProperties != 0 { - info = append(info, yaml.MapItem{"maxProperties", m.MaxProperties}) - } - if m.MinProperties != 0 { - info = append(info, yaml.MapItem{"minProperties", m.MinProperties}) - } - if len(m.Required) != 0 { - info = append(info, yaml.MapItem{"required", m.Required}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"enum", items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.AdditionalProperties != nil { - info = append(info, yaml.MapItem{"additionalProperties", m.AdditionalProperties.ToRawInfo()}) - } - // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Type != nil { - if len(m.Type.Value) == 1 { - info = append(info, yaml.MapItem{"type", m.Type.Value[0]}) - } else { - info = append(info, yaml.MapItem{"type", m.Type.Value}) - } - } - // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Items != nil { - items := make([]interface{}, 0) - for _, item := range m.Items.Schema { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"items", items[0]}) - } - // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.AllOf) != 0 { - items := make([]interface{}, 0) - for _, item := range m.AllOf { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{"allOf", items}) - } - // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.Properties != nil { - info = append(info, yaml.MapItem{"properties", m.Properties.ToRawInfo()}) - } - // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Discriminator != "" { - info = append(info, yaml.MapItem{"discriminator", m.Discriminator}) - } - if m.ReadOnly != false { - info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) - } - if m.Xml != nil { - info = append(info, yaml.MapItem{"xml", m.Xml.ToRawInfo()}) - } - // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Example != nil { - info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) - } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. -func (m *SchemaItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // SchemaItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFileSchema() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. -func (m *SecurityDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. -func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // SecurityDefinitionsItem - // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBasicAuthenticationSecurity() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetApiKeySecurity() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetOauth2ImplicitSecurity() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetOauth2PasswordSecurity() - if v3 != nil { - return v3.ToRawInfo() - } - // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v4 := m.GetOauth2ApplicationSecurity() - if v4 != nil { - return v4.ToRawInfo() - } - // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v5 := m.GetOauth2AccessCodeSecurity() - if v5 != nil { - return v5.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. -func (m *SecurityRequirement) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. -func (m *StringArray) ToRawInfo() interface{} { - return m.Value -} - -// ToRawInfo returns a description of Tag suitable for JSON or YAML export. -func (m *Tag) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. -func (m *TypeItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if len(m.Value) != 0 { - info = append(info, yaml.MapItem{"value", m.Value}) - } - return info -} - -// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. -func (m *VendorExtension) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Xml suitable for JSON or YAML export. -func (m *Xml) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) - } - if m.Namespace != "" { - info = append(info, yaml.MapItem{"namespace", m.Namespace}) - } - if m.Prefix != "" { - info = append(info, yaml.MapItem{"prefix", m.Prefix}) - } - if m.Attribute != false { - info = append(info, yaml.MapItem{"attribute", m.Attribute}) - } - if m.Wrapped != false { - info = append(info, yaml.MapItem{"wrapped", m.Wrapped}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -var ( - pattern0 = regexp.MustCompile("^x-") - pattern1 = regexp.MustCompile("^/") - pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") -) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go deleted file mode 100644 index 37da7df256f..00000000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go +++ /dev/null @@ -1,4456 +0,0 @@ -// Code generated by protoc-gen-go. -// source: OpenAPIv2/OpenAPIv2.proto -// DO NOT EDIT! - -/* -Package openapi_v2 is a generated protocol buffer package. - -It is generated from these files: - OpenAPIv2/OpenAPIv2.proto - -It has these top-level messages: - AdditionalPropertiesItem - Any - ApiKeySecurity - BasicAuthenticationSecurity - BodyParameter - Contact - Default - Definitions - Document - Examples - ExternalDocs - FileSchema - FormDataParameterSubSchema - Header - HeaderParameterSubSchema - Headers - Info - ItemsItem - JsonReference - License - NamedAny - NamedHeader - NamedParameter - NamedPathItem - NamedResponse - NamedResponseValue - NamedSchema - NamedSecurityDefinitionsItem - NamedString - NamedStringArray - NonBodyParameter - Oauth2AccessCodeSecurity - Oauth2ApplicationSecurity - Oauth2ImplicitSecurity - Oauth2PasswordSecurity - Oauth2Scopes - Operation - Parameter - ParameterDefinitions - ParametersItem - PathItem - PathParameterSubSchema - Paths - PrimitivesItems - Properties - QueryParameterSubSchema - Response - ResponseDefinitions - ResponseValue - Responses - Schema - SchemaItem - SecurityDefinitions - SecurityDefinitionsItem - SecurityRequirement - StringArray - Tag - TypeItem - VendorExtension - Xml -*/ -package openapi_v2 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type AdditionalPropertiesItem struct { - // Types that are valid to be assigned to Oneof: - // *AdditionalPropertiesItem_Schema - // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } -func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } -func (*AdditionalPropertiesItem) ProtoMessage() {} -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type isAdditionalPropertiesItem_Oneof interface { - isAdditionalPropertiesItem_Oneof() -} - -type AdditionalPropertiesItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` -} -type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` -} - -func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} -func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} - -func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *AdditionalPropertiesItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { - return x.Schema - } - return nil -} - -func (m *AdditionalPropertiesItem) GetBoolean() bool { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return x.Boolean - } - return false -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ - (*AdditionalPropertiesItem_Schema)(nil), - (*AdditionalPropertiesItem_Boolean)(nil), - } -} - -func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*AdditionalPropertiesItem) - // oneof - switch x := m.Oneof.(type) { - case *AdditionalPropertiesItem_Schema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Schema); err != nil { - return err - } - case *AdditionalPropertiesItem_Boolean: - t := uint64(0) - if x.Boolean { - t = 1 - } - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(t) - case nil: - default: - return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*AdditionalPropertiesItem) - switch tag { - case 1: // oneof.schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Schema) - err := b.DecodeMessage(msg) - m.Oneof = &AdditionalPropertiesItem_Schema{msg} - return true, err - case 2: // oneof.boolean - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} - return true, err - default: - return false, nil - } -} - -func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*AdditionalPropertiesItem) - // oneof - switch x := m.Oneof.(type) { - case *AdditionalPropertiesItem_Schema: - s := proto.Size(x.Schema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *AdditionalPropertiesItem_Boolean: - n += proto.SizeVarint(2<<3 | proto.WireVarint) - n += 1 - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Any struct { - Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *Any) GetValue() *google_protobuf.Any { - if m != nil { - return m.Value - } - return nil -} - -func (m *Any) GetYaml() string { - if m != nil { - return m.Yaml - } - return "" -} - -type ApiKeySecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } -func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } -func (*ApiKeySecurity) ProtoMessage() {} -func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *ApiKeySecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *ApiKeySecurity) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *ApiKeySecurity) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *ApiKeySecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type BasicAuthenticationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } -func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } -func (*BasicAuthenticationSecurity) ProtoMessage() {} -func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *BasicAuthenticationSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *BasicAuthenticationSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type BodyParameter struct { - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` - Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *BodyParameter) Reset() { *m = BodyParameter{} } -func (m *BodyParameter) String() string { return proto.CompactTextString(m) } -func (*BodyParameter) ProtoMessage() {} -func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *BodyParameter) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *BodyParameter) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *BodyParameter) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *BodyParameter) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *BodyParameter) GetSchema() *Schema { - if m != nil { - return m.Schema - } - return nil -} - -func (m *BodyParameter) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Contact information for the owners of the API. -type Contact struct { - // The identifying name of the contact person/organization. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The URL pointing to the contact information. - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - // The email address of the contact person/organization. - Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Contact) Reset() { *m = Contact{} } -func (m *Contact) String() string { return proto.CompactTextString(m) } -func (*Contact) ProtoMessage() {} -func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *Contact) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Contact) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *Contact) GetEmail() string { - if m != nil { - return m.Email - } - return "" -} - -func (m *Contact) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Default struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Default) Reset() { *m = Default{} } -func (m *Default) String() string { return proto.CompactTextString(m) } -func (*Default) ProtoMessage() {} -func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *Default) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -type Definitions struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Definitions) Reset() { *m = Definitions{} } -func (m *Definitions) String() string { return proto.CompactTextString(m) } -func (*Definitions) ProtoMessage() {} -func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *Definitions) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Document struct { - // The Swagger version of this document. - Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` - Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` - // The host (name or ip) of the API. Example: 'swagger.io' - Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` - // The base path to the API. Example: '/api'. - BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` - // A list of MIME types accepted by the API. - Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` - Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` - Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` - Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` - Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` - SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` - Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *Document) GetSwagger() string { - if m != nil { - return m.Swagger - } - return "" -} - -func (m *Document) GetInfo() *Info { - if m != nil { - return m.Info - } - return nil -} - -func (m *Document) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *Document) GetBasePath() string { - if m != nil { - return m.BasePath - } - return "" -} - -func (m *Document) GetSchemes() []string { - if m != nil { - return m.Schemes - } - return nil -} - -func (m *Document) GetConsumes() []string { - if m != nil { - return m.Consumes - } - return nil -} - -func (m *Document) GetProduces() []string { - if m != nil { - return m.Produces - } - return nil -} - -func (m *Document) GetPaths() *Paths { - if m != nil { - return m.Paths - } - return nil -} - -func (m *Document) GetDefinitions() *Definitions { - if m != nil { - return m.Definitions - } - return nil -} - -func (m *Document) GetParameters() *ParameterDefinitions { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *Document) GetResponses() *ResponseDefinitions { - if m != nil { - return m.Responses - } - return nil -} - -func (m *Document) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security - } - return nil -} - -func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { - if m != nil { - return m.SecurityDefinitions - } - return nil -} - -func (m *Document) GetTags() []*Tag { - if m != nil { - return m.Tags - } - return nil -} - -func (m *Document) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Document) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Examples struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Examples) Reset() { *m = Examples{} } -func (m *Examples) String() string { return proto.CompactTextString(m) } -func (*Examples) ProtoMessage() {} -func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -func (m *Examples) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// information about external documentation -type ExternalDocs struct { - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } -func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } -func (*ExternalDocs) ProtoMessage() {} -func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -func (m *ExternalDocs) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *ExternalDocs) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *ExternalDocs) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type FileSchema struct { - Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` - Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *FileSchema) Reset() { *m = FileSchema{} } -func (m *FileSchema) String() string { return proto.CompactTextString(m) } -func (*FileSchema) ProtoMessage() {} -func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -func (m *FileSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *FileSchema) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *FileSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *FileSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *FileSchema) GetRequired() []string { - if m != nil { - return m.Required - } - return nil -} - -func (m *FileSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *FileSchema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly - } - return false -} - -func (m *FileSchema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *FileSchema) GetExample() *Any { - if m != nil { - return m.Example - } - return nil -} - -func (m *FileSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type FormDataParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } -func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*FormDataParameterSubSchema) ProtoMessage() {} -func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -func (m *FormDataParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *FormDataParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *FormDataParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *FormDataParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue - } - return false -} - -func (m *FormDataParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *FormDataParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *FormDataParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *FormDataParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *FormDataParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *FormDataParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *FormDataParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *FormDataParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *FormDataParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Header struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -func (m *Header) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Header) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *Header) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *Header) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *Header) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *Header) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *Header) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *Header) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *Header) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *Header) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *Header) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *Header) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *Header) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *Header) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *Header) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *Header) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *Header) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *Header) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Header) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type HeaderParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } -func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*HeaderParameterSubSchema) ProtoMessage() {} -func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } - -func (m *HeaderParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *HeaderParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *HeaderParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *HeaderParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *HeaderParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *HeaderParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *HeaderParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *HeaderParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *HeaderParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *HeaderParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *HeaderParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *HeaderParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *HeaderParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Headers struct { - AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Headers) Reset() { *m = Headers{} } -func (m *Headers) String() string { return proto.CompactTextString(m) } -func (*Headers) ProtoMessage() {} -func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } - -func (m *Headers) GetAdditionalProperties() []*NamedHeader { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// General information about the API. -type Info struct { - // A unique and precise title of the API. - Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` - // A semantic version number of the API. - Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The terms of service for the API. - TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` - License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -func (m *Info) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *Info) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Info) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Info) GetTermsOfService() string { - if m != nil { - return m.TermsOfService - } - return "" -} - -func (m *Info) GetContact() *Contact { - if m != nil { - return m.Contact - } - return nil -} - -func (m *Info) GetLicense() *License { - if m != nil { - return m.License - } - return nil -} - -func (m *Info) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type ItemsItem struct { - Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` -} - -func (m *ItemsItem) Reset() { *m = ItemsItem{} } -func (m *ItemsItem) String() string { return proto.CompactTextString(m) } -func (*ItemsItem) ProtoMessage() {} -func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -func (m *ItemsItem) GetSchema() []*Schema { - if m != nil { - return m.Schema - } - return nil -} - -type JsonReference struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` -} - -func (m *JsonReference) Reset() { *m = JsonReference{} } -func (m *JsonReference) String() string { return proto.CompactTextString(m) } -func (*JsonReference) ProtoMessage() {} -func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -func (m *JsonReference) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *JsonReference) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -type License struct { - // The name of the license type. It's encouraged to use an OSI compatible license. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The URL pointing to the license. - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *License) Reset() { *m = License{} } -func (m *License) String() string { return proto.CompactTextString(m) } -func (*License) ProtoMessage() {} -func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *License) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *License) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *License) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -type NamedAny struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedAny) Reset() { *m = NamedAny{} } -func (m *NamedAny) String() string { return proto.CompactTextString(m) } -func (*NamedAny) ProtoMessage() {} -func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -func (m *NamedAny) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedAny) GetValue() *Any { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -type NamedHeader struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedHeader) Reset() { *m = NamedHeader{} } -func (m *NamedHeader) String() string { return proto.CompactTextString(m) } -func (*NamedHeader) ProtoMessage() {} -func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } - -func (m *NamedHeader) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedHeader) GetValue() *Header { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -type NamedParameter struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedParameter) Reset() { *m = NamedParameter{} } -func (m *NamedParameter) String() string { return proto.CompactTextString(m) } -func (*NamedParameter) ProtoMessage() {} -func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } - -func (m *NamedParameter) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedParameter) GetValue() *Parameter { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -type NamedPathItem struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } -func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } -func (*NamedPathItem) ProtoMessage() {} -func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } - -func (m *NamedPathItem) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedPathItem) GetValue() *PathItem { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -type NamedResponse struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedResponse) Reset() { *m = NamedResponse{} } -func (m *NamedResponse) String() string { return proto.CompactTextString(m) } -func (*NamedResponse) ProtoMessage() {} -func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } - -func (m *NamedResponse) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedResponse) GetValue() *Response { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -type NamedResponseValue struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } -func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } -func (*NamedResponseValue) ProtoMessage() {} -func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } - -func (m *NamedResponseValue) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedResponseValue) GetValue() *ResponseValue { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -type NamedSchema struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedSchema) Reset() { *m = NamedSchema{} } -func (m *NamedSchema) String() string { return proto.CompactTextString(m) } -func (*NamedSchema) ProtoMessage() {} -func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } - -func (m *NamedSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedSchema) GetValue() *Schema { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -type NamedSecurityDefinitionsItem struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } -func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*NamedSecurityDefinitionsItem) ProtoMessage() {} -func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } - -func (m *NamedSecurityDefinitionsItem) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -type NamedString struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedString) Reset() { *m = NamedString{} } -func (m *NamedString) String() string { return proto.CompactTextString(m) } -func (*NamedString) ProtoMessage() {} -func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } - -func (m *NamedString) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedString) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -type NamedStringArray struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } -func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } -func (*NamedStringArray) ProtoMessage() {} -func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } - -func (m *NamedStringArray) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedStringArray) GetValue() *StringArray { - if m != nil { - return m.Value - } - return nil -} - -type NonBodyParameter struct { - // Types that are valid to be assigned to Oneof: - // *NonBodyParameter_HeaderParameterSubSchema - // *NonBodyParameter_FormDataParameterSubSchema - // *NonBodyParameter_QueryParameterSubSchema - // *NonBodyParameter_PathParameterSubSchema - Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } -func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } -func (*NonBodyParameter) ProtoMessage() {} -func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } - -type isNonBodyParameter_Oneof interface { - isNonBodyParameter_Oneof() -} - -type NonBodyParameter_HeaderParameterSubSchema struct { - HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` -} -type NonBodyParameter_FormDataParameterSubSchema struct { - FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` -} -type NonBodyParameter_QueryParameterSubSchema struct { - QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` -} -type NonBodyParameter_PathParameterSubSchema struct { - PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` -} - -func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { - return x.HeaderParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { - return x.FormDataParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { - return x.QueryParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { - return x.PathParameterSubSchema - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ - (*NonBodyParameter_HeaderParameterSubSchema)(nil), - (*NonBodyParameter_FormDataParameterSubSchema)(nil), - (*NonBodyParameter_QueryParameterSubSchema)(nil), - (*NonBodyParameter_PathParameterSubSchema)(nil), - } -} - -func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*NonBodyParameter) - // oneof - switch x := m.Oneof.(type) { - case *NonBodyParameter_HeaderParameterSubSchema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_FormDataParameterSubSchema: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_QueryParameterSubSchema: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_PathParameterSubSchema: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) - } - return nil -} - -func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*NonBodyParameter) - switch tag { - case 1: // oneof.header_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(HeaderParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} - return true, err - case 2: // oneof.form_data_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(FormDataParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} - return true, err - case 3: // oneof.query_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(QueryParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} - return true, err - case 4: // oneof.path_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PathParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} - return true, err - default: - return false, nil - } -} - -func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { - m := msg.(*NonBodyParameter) - // oneof - switch x := m.Oneof.(type) { - case *NonBodyParameter_HeaderParameterSubSchema: - s := proto.Size(x.HeaderParameterSubSchema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_FormDataParameterSubSchema: - s := proto.Size(x.FormDataParameterSubSchema) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_QueryParameterSubSchema: - s := proto.Size(x.QueryParameterSubSchema) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_PathParameterSubSchema: - s := proto.Size(x.PathParameterSubSchema) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Oauth2AccessCodeSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } -func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2AccessCodeSecurity) ProtoMessage() {} -func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } - -func (m *Oauth2AccessCodeSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2ApplicationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } -func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ApplicationSecurity) ProtoMessage() {} -func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } - -func (m *Oauth2ApplicationSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2ImplicitSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } -func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ImplicitSecurity) ProtoMessage() {} -func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } - -func (m *Oauth2ImplicitSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2PasswordSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } -func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2PasswordSecurity) ProtoMessage() {} -func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } - -func (m *Oauth2PasswordSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2PasswordSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2Scopes struct { - AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } -func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } -func (*Oauth2Scopes) ProtoMessage() {} -func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } - -func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Operation struct { - Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` - // A brief summary of the operation. - Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` - // A longer description of the operation, GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - // A unique identifier of the operation. - OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` - // A list of MIME types the API can consume. - Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` - Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` - Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Operation) Reset() { *m = Operation{} } -func (m *Operation) String() string { return proto.CompactTextString(m) } -func (*Operation) ProtoMessage() {} -func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } - -func (m *Operation) GetTags() []string { - if m != nil { - return m.Tags - } - return nil -} - -func (m *Operation) GetSummary() string { - if m != nil { - return m.Summary - } - return "" -} - -func (m *Operation) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Operation) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Operation) GetOperationId() string { - if m != nil { - return m.OperationId - } - return "" -} - -func (m *Operation) GetProduces() []string { - if m != nil { - return m.Produces - } - return nil -} - -func (m *Operation) GetConsumes() []string { - if m != nil { - return m.Consumes - } - return nil -} - -func (m *Operation) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *Operation) GetResponses() *Responses { - if m != nil { - return m.Responses - } - return nil -} - -func (m *Operation) GetSchemes() []string { - if m != nil { - return m.Schemes - } - return nil -} - -func (m *Operation) GetDeprecated() bool { - if m != nil { - return m.Deprecated - } - return false -} - -func (m *Operation) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security - } - return nil -} - -func (m *Operation) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Parameter struct { - // Types that are valid to be assigned to Oneof: - // *Parameter_BodyParameter - // *Parameter_NonBodyParameter - Oneof isParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (m *Parameter) Reset() { *m = Parameter{} } -func (m *Parameter) String() string { return proto.CompactTextString(m) } -func (*Parameter) ProtoMessage() {} -func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } - -type isParameter_Oneof interface { - isParameter_Oneof() -} - -type Parameter_BodyParameter struct { - BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` -} -type Parameter_NonBodyParameter struct { - NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` -} - -func (*Parameter_BodyParameter) isParameter_Oneof() {} -func (*Parameter_NonBodyParameter) isParameter_Oneof() {} - -func (m *Parameter) GetOneof() isParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *Parameter) GetBodyParameter() *BodyParameter { - if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { - return x.BodyParameter - } - return nil -} - -func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { - if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { - return x.NonBodyParameter - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ - (*Parameter_BodyParameter)(nil), - (*Parameter_NonBodyParameter)(nil), - } -} - -func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Parameter) - // oneof - switch x := m.Oneof.(type) { - case *Parameter_BodyParameter: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.BodyParameter); err != nil { - return err - } - case *Parameter_NonBodyParameter: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.NonBodyParameter); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) - } - return nil -} - -func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Parameter) - switch tag { - case 1: // oneof.body_parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(BodyParameter) - err := b.DecodeMessage(msg) - m.Oneof = &Parameter_BodyParameter{msg} - return true, err - case 2: // oneof.non_body_parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(NonBodyParameter) - err := b.DecodeMessage(msg) - m.Oneof = &Parameter_NonBodyParameter{msg} - return true, err - default: - return false, nil - } -} - -func _Parameter_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Parameter) - // oneof - switch x := m.Oneof.(type) { - case *Parameter_BodyParameter: - s := proto.Size(x.BodyParameter) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Parameter_NonBodyParameter: - s := proto.Size(x.NonBodyParameter) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// One or more JSON representations for parameters -type ParameterDefinitions struct { - AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } -func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } -func (*ParameterDefinitions) ProtoMessage() {} -func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } - -func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type ParametersItem struct { - // Types that are valid to be assigned to Oneof: - // *ParametersItem_Parameter - // *ParametersItem_JsonReference - Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *ParametersItem) Reset() { *m = ParametersItem{} } -func (m *ParametersItem) String() string { return proto.CompactTextString(m) } -func (*ParametersItem) ProtoMessage() {} -func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } - -type isParametersItem_Oneof interface { - isParametersItem_Oneof() -} - -type ParametersItem_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` -} -type ParametersItem_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` -} - -func (*ParametersItem_Parameter) isParametersItem_Oneof() {} -func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} - -func (m *ParametersItem) GetOneof() isParametersItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *ParametersItem) GetParameter() *Parameter { - if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { - return x.Parameter - } - return nil -} - -func (m *ParametersItem) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { - return x.JsonReference - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ - (*ParametersItem_Parameter)(nil), - (*ParametersItem_JsonReference)(nil), - } -} - -func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ParametersItem) - // oneof - switch x := m.Oneof.(type) { - case *ParametersItem_Parameter: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Parameter); err != nil { - return err - } - case *ParametersItem_JsonReference: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.JsonReference); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ParametersItem) - switch tag { - case 1: // oneof.parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Parameter) - err := b.DecodeMessage(msg) - m.Oneof = &ParametersItem_Parameter{msg} - return true, err - case 2: // oneof.json_reference - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(JsonReference) - err := b.DecodeMessage(msg) - m.Oneof = &ParametersItem_JsonReference{msg} - return true, err - default: - return false, nil - } -} - -func _ParametersItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ParametersItem) - // oneof - switch x := m.Oneof.(type) { - case *ParametersItem_Parameter: - s := proto.Size(x.Parameter) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ParametersItem_JsonReference: - s := proto.Size(x.JsonReference) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type PathItem struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` - Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` - Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` - Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` - Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` - Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` - Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PathItem) Reset() { *m = PathItem{} } -func (m *PathItem) String() string { return proto.CompactTextString(m) } -func (*PathItem) ProtoMessage() {} -func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } - -func (m *PathItem) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *PathItem) GetGet() *Operation { - if m != nil { - return m.Get - } - return nil -} - -func (m *PathItem) GetPut() *Operation { - if m != nil { - return m.Put - } - return nil -} - -func (m *PathItem) GetPost() *Operation { - if m != nil { - return m.Post - } - return nil -} - -func (m *PathItem) GetDelete() *Operation { - if m != nil { - return m.Delete - } - return nil -} - -func (m *PathItem) GetOptions() *Operation { - if m != nil { - return m.Options - } - return nil -} - -func (m *PathItem) GetHead() *Operation { - if m != nil { - return m.Head - } - return nil -} - -func (m *PathItem) GetPatch() *Operation { - if m != nil { - return m.Patch - } - return nil -} - -func (m *PathItem) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *PathItem) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type PathParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } -func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*PathParameterSubSchema) ProtoMessage() {} -func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } - -func (m *PathParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *PathParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *PathParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PathParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PathParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PathParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *PathParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *PathParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *PathParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *PathParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *PathParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *PathParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *PathParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *PathParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *PathParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *PathParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *PathParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *PathParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -type Paths struct { - VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` - Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` -} - -func (m *Paths) Reset() { *m = Paths{} } -func (m *Paths) String() string { return proto.CompactTextString(m) } -func (*Paths) ProtoMessage() {} -func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } - -func (m *Paths) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -func (m *Paths) GetPath() []*NamedPathItem { - if m != nil { - return m.Path - } - return nil -} - -type PrimitivesItems struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } -func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } -func (*PrimitivesItems) ProtoMessage() {} -func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } - -func (m *PrimitivesItems) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PrimitivesItems) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *PrimitivesItems) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *PrimitivesItems) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *PrimitivesItems) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *PrimitivesItems) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *PrimitivesItems) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *PrimitivesItems) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *PrimitivesItems) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *PrimitivesItems) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *PrimitivesItems) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *PrimitivesItems) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *PrimitivesItems) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *PrimitivesItems) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *PrimitivesItems) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *PrimitivesItems) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *PrimitivesItems) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Properties struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Properties) Reset() { *m = Properties{} } -func (m *Properties) String() string { return proto.CompactTextString(m) } -func (*Properties) ProtoMessage() {} -func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } - -func (m *Properties) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type QueryParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } -func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*QueryParameterSubSchema) ProtoMessage() {} -func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } - -func (m *QueryParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *QueryParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *QueryParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *QueryParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue - } - return false -} - -func (m *QueryParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *QueryParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *QueryParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *QueryParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *QueryParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *QueryParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *QueryParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *QueryParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *QueryParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *QueryParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *QueryParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *QueryParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *QueryParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *QueryParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Response struct { - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` - Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` - Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } - -func (m *Response) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Response) GetSchema() *SchemaItem { - if m != nil { - return m.Schema - } - return nil -} - -func (m *Response) GetHeaders() *Headers { - if m != nil { - return m.Headers - } - return nil -} - -func (m *Response) GetExamples() *Examples { - if m != nil { - return m.Examples - } - return nil -} - -func (m *Response) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// One or more JSON representations for parameters -type ResponseDefinitions struct { - AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } -func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } -func (*ResponseDefinitions) ProtoMessage() {} -func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } - -func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type ResponseValue struct { - // Types that are valid to be assigned to Oneof: - // *ResponseValue_Response - // *ResponseValue_JsonReference - Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` -} - -func (m *ResponseValue) Reset() { *m = ResponseValue{} } -func (m *ResponseValue) String() string { return proto.CompactTextString(m) } -func (*ResponseValue) ProtoMessage() {} -func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } - -type isResponseValue_Oneof interface { - isResponseValue_Oneof() -} - -type ResponseValue_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` -} -type ResponseValue_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` -} - -func (*ResponseValue_Response) isResponseValue_Oneof() {} -func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} - -func (m *ResponseValue) GetOneof() isResponseValue_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *ResponseValue) GetResponse() *Response { - if x, ok := m.GetOneof().(*ResponseValue_Response); ok { - return x.Response - } - return nil -} - -func (m *ResponseValue) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { - return x.JsonReference - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ - (*ResponseValue_Response)(nil), - (*ResponseValue_JsonReference)(nil), - } -} - -func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ResponseValue) - // oneof - switch x := m.Oneof.(type) { - case *ResponseValue_Response: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Response); err != nil { - return err - } - case *ResponseValue_JsonReference: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.JsonReference); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) - } - return nil -} - -func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ResponseValue) - switch tag { - case 1: // oneof.response - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Response) - err := b.DecodeMessage(msg) - m.Oneof = &ResponseValue_Response{msg} - return true, err - case 2: // oneof.json_reference - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(JsonReference) - err := b.DecodeMessage(msg) - m.Oneof = &ResponseValue_JsonReference{msg} - return true, err - default: - return false, nil - } -} - -func _ResponseValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ResponseValue) - // oneof - switch x := m.Oneof.(type) { - case *ResponseValue_Response: - s := proto.Size(x.Response) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseValue_JsonReference: - s := proto.Size(x.JsonReference) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Response objects names can either be any valid HTTP status code or 'default'. -type Responses struct { - ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Responses) Reset() { *m = Responses{} } -func (m *Responses) String() string { return proto.CompactTextString(m) } -func (*Responses) ProtoMessage() {} -func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } - -func (m *Responses) GetResponseCode() []*NamedResponseValue { - if m != nil { - return m.ResponseCode - } - return nil -} - -func (m *Responses) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type Schema struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` - MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` - Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` - Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` - Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` - AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` - Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` - Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` - ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Schema) Reset() { *m = Schema{} } -func (m *Schema) String() string { return proto.CompactTextString(m) } -func (*Schema) ProtoMessage() {} -func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } - -func (m *Schema) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *Schema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *Schema) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *Schema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Schema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *Schema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *Schema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *Schema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *Schema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *Schema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *Schema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *Schema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *Schema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *Schema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *Schema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *Schema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *Schema) GetMaxProperties() int64 { - if m != nil { - return m.MaxProperties - } - return 0 -} - -func (m *Schema) GetMinProperties() int64 { - if m != nil { - return m.MinProperties - } - return 0 -} - -func (m *Schema) GetRequired() []string { - if m != nil { - return m.Required - } - return nil -} - -func (m *Schema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -func (m *Schema) GetType() *TypeItem { - if m != nil { - return m.Type - } - return nil -} - -func (m *Schema) GetItems() *ItemsItem { - if m != nil { - return m.Items - } - return nil -} - -func (m *Schema) GetAllOf() []*Schema { - if m != nil { - return m.AllOf - } - return nil -} - -func (m *Schema) GetProperties() *Properties { - if m != nil { - return m.Properties - } - return nil -} - -func (m *Schema) GetDiscriminator() string { - if m != nil { - return m.Discriminator - } - return "" -} - -func (m *Schema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly - } - return false -} - -func (m *Schema) GetXml() *Xml { - if m != nil { - return m.Xml - } - return nil -} - -func (m *Schema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Schema) GetExample() *Any { - if m != nil { - return m.Example - } - return nil -} - -func (m *Schema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type SchemaItem struct { - // Types that are valid to be assigned to Oneof: - // *SchemaItem_Schema - // *SchemaItem_FileSchema - Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *SchemaItem) Reset() { *m = SchemaItem{} } -func (m *SchemaItem) String() string { return proto.CompactTextString(m) } -func (*SchemaItem) ProtoMessage() {} -func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } - -type isSchemaItem_Oneof interface { - isSchemaItem_Oneof() -} - -type SchemaItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` -} -type SchemaItem_FileSchema struct { - FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` -} - -func (*SchemaItem_Schema) isSchemaItem_Oneof() {} -func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} - -func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *SchemaItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { - return x.Schema - } - return nil -} - -func (m *SchemaItem) GetFileSchema() *FileSchema { - if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { - return x.FileSchema - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ - (*SchemaItem_Schema)(nil), - (*SchemaItem_FileSchema)(nil), - } -} - -func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*SchemaItem) - // oneof - switch x := m.Oneof.(type) { - case *SchemaItem_Schema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Schema); err != nil { - return err - } - case *SchemaItem_FileSchema: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.FileSchema); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*SchemaItem) - switch tag { - case 1: // oneof.schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Schema) - err := b.DecodeMessage(msg) - m.Oneof = &SchemaItem_Schema{msg} - return true, err - case 2: // oneof.file_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(FileSchema) - err := b.DecodeMessage(msg) - m.Oneof = &SchemaItem_FileSchema{msg} - return true, err - default: - return false, nil - } -} - -func _SchemaItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*SchemaItem) - // oneof - switch x := m.Oneof.(type) { - case *SchemaItem_Schema: - s := proto.Size(x.Schema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SchemaItem_FileSchema: - s := proto.Size(x.FileSchema) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type SecurityDefinitions struct { - AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } -func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitions) ProtoMessage() {} -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } - -func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type SecurityDefinitionsItem struct { - // Types that are valid to be assigned to Oneof: - // *SecurityDefinitionsItem_BasicAuthenticationSecurity - // *SecurityDefinitionsItem_ApiKeySecurity - // *SecurityDefinitionsItem_Oauth2ImplicitSecurity - // *SecurityDefinitionsItem_Oauth2PasswordSecurity - // *SecurityDefinitionsItem_Oauth2ApplicationSecurity - // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity - Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } -func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitionsItem) ProtoMessage() {} -func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } - -type isSecurityDefinitionsItem_Oneof interface { - isSecurityDefinitionsItem_Oneof() -} - -type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { - BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` -} -type SecurityDefinitionsItem_ApiKeySecurity struct { - ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { - Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { - Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { - Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { - Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` -} - -func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { - return x.BasicAuthenticationSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { - return x.ApiKeySecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { - return x.Oauth2ImplicitSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { - return x.Oauth2PasswordSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { - return x.Oauth2ApplicationSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { - return x.Oauth2AccessCodeSecurity - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ - (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), - (*SecurityDefinitionsItem_ApiKeySecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), - } -} - -func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*SecurityDefinitionsItem) - // oneof - switch x := m.Oneof.(type) { - case *SecurityDefinitionsItem_BasicAuthenticationSecurity: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_ApiKeySecurity: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2PasswordSecurity: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: - b.EncodeVarint(5<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*SecurityDefinitionsItem) - switch tag { - case 1: // oneof.basic_authentication_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(BasicAuthenticationSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} - return true, err - case 2: // oneof.api_key_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ApiKeySecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} - return true, err - case 3: // oneof.oauth2_implicit_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2ImplicitSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} - return true, err - case 4: // oneof.oauth2_password_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2PasswordSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} - return true, err - case 5: // oneof.oauth2_application_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2ApplicationSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} - return true, err - case 6: // oneof.oauth2_access_code_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2AccessCodeSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} - return true, err - default: - return false, nil - } -} - -func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*SecurityDefinitionsItem) - // oneof - switch x := m.Oneof.(type) { - case *SecurityDefinitionsItem_BasicAuthenticationSecurity: - s := proto.Size(x.BasicAuthenticationSecurity) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_ApiKeySecurity: - s := proto.Size(x.ApiKeySecurity) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: - s := proto.Size(x.Oauth2ImplicitSecurity) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2PasswordSecurity: - s := proto.Size(x.Oauth2PasswordSecurity) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: - s := proto.Size(x.Oauth2ApplicationSecurity) - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: - s := proto.Size(x.Oauth2AccessCodeSecurity) - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type SecurityRequirement struct { - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } -func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } -func (*SecurityRequirement) ProtoMessage() {} -func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } - -func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type StringArray struct { - Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` -} - -func (m *StringArray) Reset() { *m = StringArray{} } -func (m *StringArray) String() string { return proto.CompactTextString(m) } -func (*StringArray) ProtoMessage() {} -func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } - -func (m *StringArray) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Tag struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Tag) Reset() { *m = Tag{} } -func (m *Tag) String() string { return proto.CompactTextString(m) } -func (*Tag) ProtoMessage() {} -func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } - -func (m *Tag) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Tag) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Tag) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Tag) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type TypeItem struct { - Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` -} - -func (m *TypeItem) Reset() { *m = TypeItem{} } -func (m *TypeItem) String() string { return proto.CompactTextString(m) } -func (*TypeItem) ProtoMessage() {} -func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } - -func (m *TypeItem) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -// Any property starting with x- is valid. -type VendorExtension struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *VendorExtension) Reset() { *m = VendorExtension{} } -func (m *VendorExtension) String() string { return proto.CompactTextString(m) } -func (*VendorExtension) ProtoMessage() {} -func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } - -func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Xml struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Xml) Reset() { *m = Xml{} } -func (m *Xml) String() string { return proto.CompactTextString(m) } -func (*Xml) ProtoMessage() {} -func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } - -func (m *Xml) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Xml) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -func (m *Xml) GetPrefix() string { - if m != nil { - return m.Prefix - } - return "" -} - -func (m *Xml) GetAttribute() bool { - if m != nil { - return m.Attribute - } - return false -} - -func (m *Xml) GetWrapped() bool { - if m != nil { - return m.Wrapped - } - return false -} - -func (m *Xml) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -func init() { - proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") - proto.RegisterType((*Any)(nil), "openapi.v2.Any") - proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") - proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") - proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") - proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") - proto.RegisterType((*Default)(nil), "openapi.v2.Default") - proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") - proto.RegisterType((*Document)(nil), "openapi.v2.Document") - proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") - proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") - proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") - proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") - proto.RegisterType((*Header)(nil), "openapi.v2.Header") - proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") - proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") - proto.RegisterType((*Info)(nil), "openapi.v2.Info") - proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") - proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") - proto.RegisterType((*License)(nil), "openapi.v2.License") - proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") - proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") - proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") - proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") - proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") - proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") - proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") - proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") - proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") - proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") - proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") - proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") - proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") - proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") - proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") - proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") - proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") - proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") - proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") - proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") - proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") - proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") - proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") - proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") - proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") - proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") - proto.RegisterType((*Response)(nil), "openapi.v2.Response") - proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") - proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") - proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") - proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") - proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") - proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") - proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") - proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") - proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") - proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") - proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") - proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") - proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") -} - -func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 3129 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, - 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, - 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, - 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, - 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, - 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d, - 0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7, - 0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7, - 0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f, - 0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c, - 0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c, - 0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc, - 0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01, - 0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2, - 0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5, - 0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23, - 0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51, - 0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2, - 0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5, - 0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c, - 0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89, - 0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f, - 0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d, - 0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c, - 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e, - 0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1, - 0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55, - 0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87, - 0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4, - 0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30, - 0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6, - 0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17, - 0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4, - 0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74, - 0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2, - 0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4, - 0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0, - 0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa, - 0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61, - 0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e, - 0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b, - 0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0, - 0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7, - 0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5, - 0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e, - 0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04, - 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d, - 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d, - 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13, - 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18, - 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9, - 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73, - 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a, - 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9, - 0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8, - 0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19, - 0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9, - 0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60, - 0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29, - 0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92, - 0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc, - 0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a, - 0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe, - 0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6, - 0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8, - 0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90, - 0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf, - 0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64, - 0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0, - 0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d, - 0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18, - 0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4, - 0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58, - 0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21, - 0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d, - 0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9, - 0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67, - 0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17, - 0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27, - 0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5, - 0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a, - 0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4, - 0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e, - 0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2, - 0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87, - 0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38, - 0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93, - 0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0, - 0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b, - 0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2, - 0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86, - 0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c, - 0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02, - 0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3, - 0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3, - 0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e, - 0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7, - 0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94, - 0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34, - 0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a, - 0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8, - 0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b, - 0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5, - 0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e, - 0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89, - 0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6, - 0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c, - 0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14, - 0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41, - 0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7, - 0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d, - 0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc, - 0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7, - 0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3, - 0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69, - 0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36, - 0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e, - 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c, - 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab, - 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33, - 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07, - 0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f, - 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f, - 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b, - 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc, - 0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e, - 0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad, - 0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16, - 0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89, - 0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45, - 0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8, - 0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b, - 0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08, - 0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93, - 0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7, - 0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb, - 0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74, - 0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41, - 0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20, - 0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a, - 0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94, - 0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e, - 0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4, - 0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6, - 0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36, - 0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a, - 0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58, - 0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46, - 0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88, - 0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c, - 0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81, - 0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61, - 0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4, - 0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea, - 0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf, - 0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b, - 0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0, - 0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4, - 0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5, - 0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44, - 0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f, - 0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36, - 0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd, - 0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e, - 0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb, - 0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d, - 0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77, - 0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9, - 0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5, - 0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4, - 0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6, - 0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f, - 0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb, - 0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7, - 0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66, - 0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9, - 0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d, - 0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2, - 0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c, - 0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2, - 0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62, - 0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7, - 0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1, - 0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21, - 0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e, - 0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6, - 0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe, - 0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c, - 0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae, - 0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e, - 0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1, - 0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8, - 0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47, - 0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf, - 0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, - 0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00, -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go deleted file mode 100644 index a64c1b75db4..00000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -// Context contains state of the compiler as it traverses a document. -type Context struct { - Parent *Context - Name string - ExtensionHandlers *[]ExtensionHandler -} - -// NewContextWithExtensions returns a new object representing the compiler state -func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { - return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} -} - -// NewContext returns a new object representing the compiler state -func NewContext(name string, parent *Context) *Context { - if parent != nil { - return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} - } - return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} -} - -// Description returns a text description of the compiler state -func (context *Context) Description() string { - if context.Parent != nil { - return context.Parent.Description() + "." + context.Name - } - return context.Name -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go deleted file mode 100644 index d8672c1008b..00000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/error.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -// Error represents compiler errors and their location in the document. -type Error struct { - Context *Context - Message string -} - -// NewError creates an Error. -func NewError(context *Context, message string) *Error { - return &Error{Context: context, Message: message} -} - -// Error returns the string value of an Error. -func (err *Error) Error() string { - if err.Context == nil { - return "ERROR " + err.Message - } - return "ERROR " + err.Context.Description() + " " + err.Message -} - -// ErrorGroup is a container for groups of Error values. -type ErrorGroup struct { - Errors []error -} - -// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. -func NewErrorGroupOrNil(errors []error) error { - if len(errors) == 0 { - return nil - } else if len(errors) == 1 { - return errors[0] - } else { - return &ErrorGroup{Errors: errors} - } -} - -func (group *ErrorGroup) Error() string { - result := "" - for i, err := range group.Errors { - if i > 0 { - result += "\n" - } - result += err.Error() - } - return result -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go deleted file mode 100644 index 1f85b650e81..00000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "bytes" - "fmt" - "os/exec" - - "strings" - - "errors" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - ext_plugin "github.com/googleapis/gnostic/extensions" - yaml "gopkg.in/yaml.v2" -) - -// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. -type ExtensionHandler struct { - Name string -} - -// HandleExtension calls a binary extension handler. -func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { - handled := false - var errFromPlugin error - var outFromPlugin *any.Any - - if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { - for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { - outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) - if outFromPlugin == nil { - continue - } else { - handled = true - break - } - } - } - return handled, outFromPlugin, errFromPlugin -} - -func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { - if extensionHandlers.Name != "" { - binary, _ := yaml.Marshal(in) - - request := &ext_plugin.ExtensionHandlerRequest{} - - version := &ext_plugin.Version{} - version.Major = 0 - version.Minor = 1 - version.Patch = 0 - request.CompilerVersion = version - - request.Wrapper = &ext_plugin.Wrapper{} - - request.Wrapper.Version = "v2" - request.Wrapper.Yaml = string(binary) - request.Wrapper.ExtensionName = extensionName - - requestBytes, _ := proto.Marshal(request) - cmd := exec.Command(extensionHandlers.Name) - cmd.Stdin = bytes.NewReader(requestBytes) - output, err := cmd.Output() - - if err != nil { - fmt.Printf("Error: %+v\n", err) - return nil, err - } - response := &ext_plugin.ExtensionHandlerResponse{} - err = proto.Unmarshal(output, response) - if err != nil { - fmt.Printf("Error: %+v\n", err) - fmt.Printf("%s\n", string(output)) - return nil, err - } - if !response.Handled { - return nil, nil - } - if len(response.Error) != 0 { - message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) - return nil, errors.New(message) - } - return response.Value, nil - } - return nil, nil -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go deleted file mode 100644 index 76df635ff9b..00000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/helpers.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "gopkg.in/yaml.v2" - "regexp" - "sort" - "strconv" -) - -// compiler helper functions, usually called from generated code - -// UnpackMap gets a yaml.MapSlice if possible. -func UnpackMap(in interface{}) (yaml.MapSlice, bool) { - m, ok := in.(yaml.MapSlice) - if ok { - return m, true - } - // do we have an empty array? - a, ok := in.([]interface{}) - if ok && len(a) == 0 { - // if so, return an empty map - return yaml.MapSlice{}, true - } - return nil, false -} - -// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. -func SortedKeysForMap(m yaml.MapSlice) []string { - keys := make([]string, 0) - for _, item := range m { - keys = append(keys, item.Key.(string)) - } - sort.Strings(keys) - return keys -} - -// MapHasKey returns true if a yaml.MapSlice contains a specified key. -func MapHasKey(m yaml.MapSlice, key string) bool { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return true - } - } - return false -} - -// MapValueForKey gets the value of a map value for a specified key. -func MapValueForKey(m yaml.MapSlice, key string) interface{} { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return item.Value - } - } - return nil -} - -// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. -func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { - stringArray := make([]string, 0) - for _, item := range interfaceArray { - v, ok := item.(string) - if ok { - stringArray = append(stringArray, v) - } - } - return stringArray -} - -// MissingKeysInMap identifies which keys from a list of required keys are not in a map. -func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { - missingKeys := make([]string, 0) - for _, k := range requiredKeys { - if !MapHasKey(m, k) { - missingKeys = append(missingKeys, k) - } - } - return missingKeys -} - -// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. -func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { - invalidKeys := make([]string, 0) - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok { - key := itemKey - found := false - // does the key match an allowed key? - for _, allowedKey := range allowedKeys { - if key == allowedKey { - found = true - break - } - } - if !found { - // does the key match an allowed pattern? - for _, allowedPattern := range allowedPatterns { - if allowedPattern.MatchString(key) { - found = true - break - } - } - if !found { - invalidKeys = append(invalidKeys, key) - } - } - } - } - return invalidKeys -} - -// DescribeMap describes a map (for debugging purposes). -func DescribeMap(in interface{}, indent string) string { - description := "" - m, ok := in.(map[string]interface{}) - if ok { - keys := make([]string, 0) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := m[k] - description += fmt.Sprintf("%s%s:\n", indent, k) - description += DescribeMap(v, indent+" ") - } - return description - } - a, ok := in.([]interface{}) - if ok { - for i, v := range a { - description += fmt.Sprintf("%s%d:\n", indent, i) - description += DescribeMap(v, indent+" ") - } - return description - } - description += fmt.Sprintf("%s%+v\n", indent, in) - return description -} - -// PluralProperties returns the string "properties" pluralized. -func PluralProperties(count int) string { - if count == 1 { - return "property" - } - return "properties" -} - -// StringArrayContainsValue returns true if a string array contains a specified value. -func StringArrayContainsValue(array []string, value string) bool { - for _, item := range array { - if item == value { - return true - } - } - return false -} - -// StringArrayContainsValues returns true if a string array contains all of a list of specified values. -func StringArrayContainsValues(array []string, values []string) bool { - for _, value := range values { - if !StringArrayContainsValue(array, value) { - return false - } - } - return true -} - -// StringValue returns the string value of an item. -func StringValue(item interface{}) (value string, ok bool) { - value, ok = item.(string) - if ok { - return value, ok - } - intValue, ok := item.(int) - if ok { - return strconv.Itoa(intValue), true - } - return "", false -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go deleted file mode 100644 index 9713a21cca2..00000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/main.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package compiler provides support functions to generated compiler code. -package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go deleted file mode 100644 index 604a46a6a1e..00000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "gopkg.in/yaml.v2" - "io/ioutil" - "log" - "net/http" - "net/url" - "path/filepath" - "strings" -) - -var fileCache map[string][]byte -var infoCache map[string]interface{} -var count int64 - -var verboseReader = false - -func initializeFileCache() { - if fileCache == nil { - fileCache = make(map[string][]byte, 0) - } -} - -func initializeInfoCache() { - if infoCache == nil { - infoCache = make(map[string]interface{}, 0) - } -} - -// FetchFile gets a specified file from the local filesystem or a remote location. -func FetchFile(fileurl string) ([]byte, error) { - initializeFileCache() - bytes, ok := fileCache[fileurl] - if ok { - if verboseReader { - log.Printf("Cache hit %s", fileurl) - } - return bytes, nil - } - log.Printf("Fetching %s", fileurl) - response, err := http.Get(fileurl) - if err != nil { - return nil, err - } - defer response.Body.Close() - bytes, err = ioutil.ReadAll(response.Body) - if err == nil { - fileCache[fileurl] = bytes - } - return bytes, err -} - -// ReadBytesForFile reads the bytes of a file. -func ReadBytesForFile(filename string) ([]byte, error) { - // is the filename a url? - fileurl, _ := url.Parse(filename) - if fileurl.Scheme != "" { - // yes, fetch it - bytes, err := FetchFile(filename) - if err != nil { - return nil, err - } - return bytes, nil - } - // no, it's a local filename - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - return bytes, nil -} - -// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. -func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { - initializeInfoCache() - cachedInfo, ok := infoCache[filename] - if ok { - if verboseReader { - log.Printf("Cache hit info for file %s", filename) - } - return cachedInfo, nil - } - if verboseReader { - log.Printf("Reading info for file %s", filename) - } - var info yaml.MapSlice - err := yaml.Unmarshal(bytes, &info) - if err != nil { - return nil, err - } - infoCache[filename] = info - return info, nil -} - -// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. -func ReadInfoForRef(basefile string, ref string) (interface{}, error) { - initializeInfoCache() - { - info, ok := infoCache[ref] - if ok { - if verboseReader { - log.Printf("Cache hit for ref %s#%s", basefile, ref) - } - return info, nil - } - } - if verboseReader { - log.Printf("Reading info for ref %s#%s", basefile, ref) - } - count = count + 1 - basedir, _ := filepath.Split(basefile) - parts := strings.Split(ref, "#") - var filename string - if parts[0] != "" { - filename = basedir + parts[0] - } else { - filename = basefile - } - bytes, err := ReadBytesForFile(filename) - if err != nil { - return nil, err - } - info, err := ReadInfoFromBytes(filename, bytes) - if err != nil { - log.Printf("File error: %v\n", err) - } else { - if len(parts) > 1 { - path := strings.Split(parts[1], "/") - for i, key := range path { - if i > 0 { - m, ok := info.(yaml.MapSlice) - if ok { - found := false - for _, section := range m { - if section.Key == key { - info = section.Value - found = true - } - } - if !found { - infoCache[ref] = nil - return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) - } - } - } - } - } - } - infoCache[ref] = info - return info, nil -} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go deleted file mode 100644 index b14f1f945fc..00000000000 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ /dev/null @@ -1,219 +0,0 @@ -// Code generated by protoc-gen-go. -// source: extension.proto -// DO NOT EDIT! - -/* -Package openapiextension_v1 is a generated protocol buffer package. - -It is generated from these files: - extension.proto - -It has these top-level messages: - Version - ExtensionHandlerRequest - ExtensionHandlerResponse - Wrapper -*/ -package openapiextension_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The version number of OpenAPI compiler. -type Version struct { - Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Version) GetMajor() int32 { - if m != nil { - return m.Major - } - return 0 -} - -func (m *Version) GetMinor() int32 { - if m != nil { - return m.Minor - } - return 0 -} - -func (m *Version) GetPatch() int32 { - if m != nil { - return m.Patch - } - return 0 -} - -func (m *Version) GetSuffix() string { - if m != nil { - return m.Suffix - } - return "" -} - -// An encoded Request is written to the ExtensionHandler's stdin. -type ExtensionHandlerRequest struct { - // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to openapic. - Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` - // The version number of openapi compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` -} - -func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } -func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerRequest) ProtoMessage() {} -func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { - if m != nil { - return m.Wrapper - } - return nil -} - -func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { - if m != nil { - return m.CompilerVersion - } - return nil -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -type ExtensionHandlerResponse struct { - // true if the extension is handled by the extension handler; false otherwise - Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` - // Error message. If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` - // text output - Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` -} - -func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } -func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerResponse) ProtoMessage() {} -func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *ExtensionHandlerResponse) GetHandled() bool { - if m != nil { - return m.Handled - } - return false -} - -func (m *ExtensionHandlerResponse) GetError() []string { - if m != nil { - return m.Error - } - return nil -} - -func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { - if m != nil { - return m.Value - } - return nil -} - -type Wrapper struct { - // version of the OpenAPI specification in which this extension was written. - Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // Name of the extension - ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` - // Must be a valid yaml for the proto - Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` -} - -func (m *Wrapper) Reset() { *m = Wrapper{} } -func (m *Wrapper) String() string { return proto.CompactTextString(m) } -func (*Wrapper) ProtoMessage() {} -func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *Wrapper) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Wrapper) GetExtensionName() string { - if m != nil { - return m.ExtensionName - } - return "" -} - -func (m *Wrapper) GetYaml() string { - if m != nil { - return m.Yaml - } - return "" -} - -func init() { - proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") - proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") - proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") - proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") -} - -func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, - 0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, - 0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, - 0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, - 0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4, - 0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4, - 0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0, - 0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34, - 0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a, - 0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a, - 0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66, - 0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2, - 0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d, - 0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3, - 0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c, - 0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa, - 0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab, - 0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3, - 0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62, - 0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23, - 0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1, - 0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52, - 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go deleted file mode 100644 index 94a8e62a776..00000000000 --- a/vendor/github.com/googleapis/gnostic/extensions/extensions.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openapiextension_v1 - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" -) - -type documentHandler func(version string, extensionName string, document string) -type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) - -func forInputYamlFromOpenapic(handler documentHandler) { - data, err := ioutil.ReadAll(os.Stdin) - if err != nil { - fmt.Println("File error:", err.Error()) - os.Exit(1) - } - if len(data) == 0 { - fmt.Println("No input data.") - os.Exit(1) - } - request := &ExtensionHandlerRequest{} - err = proto.Unmarshal(data, request) - if err != nil { - fmt.Println("Input error:", err.Error()) - os.Exit(1) - } - handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) -} - -// ProcessExtension calles the handler for a specified extension. -func ProcessExtension(handleExtension extensionHandler) { - response := &ExtensionHandlerResponse{} - forInputYamlFromOpenapic( - func(version string, extensionName string, yamlInput string) { - var newObject proto.Message - var err error - - handled, newObject, err := handleExtension(extensionName, yamlInput) - if !handled { - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - - // If we reach here, then the extension is handled - response.Handled = true - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - response.Value, err = ptypes.MarshalAny(newObject) - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - }) - - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) -} diff --git a/vendor/github.com/googleapis/gnostic/gnostic.go b/vendor/github.com/googleapis/gnostic/gnostic.go deleted file mode 100644 index d2670cdea91..00000000000 --- a/vendor/github.com/googleapis/gnostic/gnostic.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate ./COMPILE-PROTOS.sh - -// Gnostic is a tool for building better REST APIs through knowledge. -// -// Gnostic reads declarative descriptions of REST APIs that conform -// to the OpenAPI Specification, reports errors, resolves internal -// dependencies, and puts the results in a binary form that can -// be used in any language that is supported by the Protocol Buffer -// tools. -// -// Gnostic models are validated and typed. This allows API tool -// developers to focus on their product and not worry about input -// validation and type checking. -// -// Gnostic calls plugins that implement a variety of API implementation -// and support features including generation of client and server -// support code. -package main - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path" - "path/filepath" - "regexp" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/googleapis/gnostic/OpenAPIv2" - "github.com/googleapis/gnostic/OpenAPIv3" - "github.com/googleapis/gnostic/compiler" - "github.com/googleapis/gnostic/jsonwriter" - plugins "github.com/googleapis/gnostic/plugins" - "gopkg.in/yaml.v2" -) - -const ( // OpenAPI Version - openAPIvUnknown = 0 - openAPIv2 = 2 - openAPIv3 = 3 -) - -// Determine the version of an OpenAPI description read from JSON or YAML. -func getOpenAPIVersionFromInfo(info interface{}) int { - m, ok := compiler.UnpackMap(info) - if !ok { - return openAPIvUnknown - } - swagger, ok := compiler.MapValueForKey(m, "swagger").(string) - if ok && strings.HasPrefix(swagger, "2.0") { - return openAPIv2 - } - openapi, ok := compiler.MapValueForKey(m, "openapi").(string) - if ok && strings.HasPrefix(openapi, "3.0") { - return openAPIv3 - } - return openAPIvUnknown -} - -const ( - pluginPrefix = "gnostic-" - extensionPrefix = "gnostic-x-" -) - -type pluginCall struct { - Name string - Invocation string -} - -// Invokes a plugin. -func (p *pluginCall) perform(document proto.Message, openAPIVersion int, sourceName string) error { - if p.Name != "" { - request := &plugins.Request{} - - // Infer the name of the executable by adding the prefix. - executableName := pluginPrefix + p.Name - - // Validate invocation string with regular expression. - invocation := p.Invocation - - // - // Plugin invocations must consist of - // zero or more comma-separated key=value pairs followed by a path. - // If pairs are present, a colon separates them from the path. - // Keys and values must be alphanumeric strings and may contain - // dashes, underscores, periods, or forward slashes. - // A path can contain any characters other than the separators ',', ':', and '='. - // - invocationRegex := regexp.MustCompile(`^([\w-_\/\.]+=[\w-_\/\.]+(,[\w-_\/\.]+=[\w-_\/\.]+)*:)?[^,:=]+$`) - if !invocationRegex.Match([]byte(p.Invocation)) { - return fmt.Errorf("Invalid invocation of %s: %s", executableName, invocation) - } - - invocationParts := strings.Split(p.Invocation, ":") - var outputLocation string - switch len(invocationParts) { - case 1: - outputLocation = invocationParts[0] - case 2: - parameters := strings.Split(invocationParts[0], ",") - for _, keyvalue := range parameters { - pair := strings.Split(keyvalue, "=") - if len(pair) == 2 { - request.Parameters = append(request.Parameters, &plugins.Parameter{Name: pair[0], Value: pair[1]}) - } - } - outputLocation = invocationParts[1] - default: - // badly-formed request - outputLocation = invocationParts[len(invocationParts)-1] - } - - version := &plugins.Version{} - version.Major = 0 - version.Minor = 1 - version.Patch = 0 - request.CompilerVersion = version - - request.OutputPath = outputLocation - - wrapper := &plugins.Wrapper{} - wrapper.Name = sourceName - switch openAPIVersion { - case openAPIv2: - wrapper.Version = "v2" - case openAPIv3: - wrapper.Version = "v3" - default: - wrapper.Version = "unknown" - } - protoBytes, _ := proto.Marshal(document) - wrapper.Value = protoBytes - request.Wrapper = wrapper - requestBytes, _ := proto.Marshal(request) - - cmd := exec.Command(executableName) - cmd.Stdin = bytes.NewReader(requestBytes) - cmd.Stderr = os.Stderr - output, err := cmd.Output() - if err != nil { - return err - } - response := &plugins.Response{} - err = proto.Unmarshal(output, response) - if err != nil { - return err - } - - if response.Errors != nil { - return fmt.Errorf("Plugin error: %+v", response.Errors) - } - - // Write files to the specified directory. - var writer io.Writer - switch { - case outputLocation == "!": - // Write nothing. - case outputLocation == "-": - writer = os.Stdout - for _, file := range response.Files { - writer.Write([]byte("\n\n" + file.Name + " -------------------- \n")) - writer.Write(file.Data) - } - case isFile(outputLocation): - return fmt.Errorf("unable to overwrite %s", outputLocation) - default: // write files into a directory named by outputLocation - if !isDirectory(outputLocation) { - os.Mkdir(outputLocation, 0755) - } - for _, file := range response.Files { - p := outputLocation + "/" + file.Name - dir := path.Dir(p) - os.MkdirAll(dir, 0755) - f, _ := os.Create(p) - defer f.Close() - f.Write(file.Data) - } - } - } - return nil -} - -func isFile(path string) bool { - fileInfo, err := os.Stat(path) - if err != nil { - return false - } - return !fileInfo.IsDir() -} - -func isDirectory(path string) bool { - fileInfo, err := os.Stat(path) - if err != nil { - return false - } - return fileInfo.IsDir() -} - -// Write bytes to a named file. -// Certain names have special meaning: -// ! writes nothing -// - writes to stdout -// = writes to stderr -// If a directory name is given, the file is written there with -// a name derived from the source and extension arguments. -func writeFile(name string, bytes []byte, source string, extension string) { - var writer io.Writer - if name == "!" { - return - } else if name == "-" { - writer = os.Stdout - } else if name == "=" { - writer = os.Stderr - } else if isDirectory(name) { - base := filepath.Base(source) - // Remove the original source extension. - base = base[0 : len(base)-len(filepath.Ext(base))] - // Build the path that puts the result in the passed-in directory. - filename := name + "/" + base + "." + extension - file, _ := os.Create(filename) - defer file.Close() - writer = file - } else { - file, _ := os.Create(name) - defer file.Close() - writer = file - } - writer.Write(bytes) - if name == "-" || name == "=" { - writer.Write([]byte("\n")) - } -} - -// The Gnostic structure holds global state information for gnostic. -type Gnostic struct { - usage string - sourceName string - binaryOutputPath string - textOutputPath string - yamlOutputPath string - jsonOutputPath string - errorOutputPath string - resolveReferences bool - pluginCalls []*pluginCall - extensionHandlers []compiler.ExtensionHandler - openAPIVersion int -} - -// Initialize a structure to store global application state. -func newGnostic() *Gnostic { - g := &Gnostic{} - // Option fields initialize to their default values. - g.usage = ` -Usage: gnostic OPENAPI_SOURCE [OPTIONS] - OPENAPI_SOURCE is the filename or URL of an OpenAPI description to read. -Options: - --pb-out=PATH Write a binary proto to the specified location. - --text-out=PATH Write a text proto to the specified location. - --json-out=PATH Write a json API description to the specified location. - --yaml-out=PATH Write a yaml API description to the specified location. - --errors-out=PATH Write compilation errors to the specified location. - --PLUGIN-out=PATH Run the plugin named gnostic_PLUGIN and write results - to the specified location. - --x-EXTENSION Use the extension named gnostic-x-EXTENSION - to process OpenAPI specification extensions. - --resolve-refs Explicitly resolve $ref references. - This could have problems with recursive definitions. -` - // Initialize internal structures. - g.pluginCalls = make([]*pluginCall, 0) - g.extensionHandlers = make([]compiler.ExtensionHandler, 0) - return g -} - -// Parse command-line options. -func (g *Gnostic) readOptions() { - // plugin processing matches patterns of the form "--PLUGIN-out=PATH" and "--PLUGIN_out=PATH" - pluginRegex := regexp.MustCompile("--(.+)[-_]out=(.+)") - - // extension processing matches patterns of the form "--x-EXTENSION" - extensionRegex := regexp.MustCompile("--x-(.+)") - - for i, arg := range os.Args { - if i == 0 { - continue // skip the tool name - } - var m [][]byte - if m = pluginRegex.FindSubmatch([]byte(arg)); m != nil { - pluginName := string(m[1]) - invocation := string(m[2]) - switch pluginName { - case "pb": - g.binaryOutputPath = invocation - case "text": - g.textOutputPath = invocation - case "json": - g.jsonOutputPath = invocation - case "yaml": - g.yamlOutputPath = invocation - case "errors": - g.errorOutputPath = invocation - default: - p := &pluginCall{Name: pluginName, Invocation: invocation} - g.pluginCalls = append(g.pluginCalls, p) - } - } else if m = extensionRegex.FindSubmatch([]byte(arg)); m != nil { - extensionName := string(m[1]) - extensionHandler := compiler.ExtensionHandler{Name: extensionPrefix + extensionName} - g.extensionHandlers = append(g.extensionHandlers, extensionHandler) - } else if arg == "--resolve-refs" { - g.resolveReferences = true - } else if arg[0] == '-' { - fmt.Fprintf(os.Stderr, "Unknown option: %s.\n%s\n", arg, g.usage) - os.Exit(-1) - } else { - g.sourceName = arg - } - } -} - -// Validate command-line options. -func (g *Gnostic) validateOptions() { - if g.binaryOutputPath == "" && - g.textOutputPath == "" && - g.yamlOutputPath == "" && - g.jsonOutputPath == "" && - g.errorOutputPath == "" && - len(g.pluginCalls) == 0 { - fmt.Fprintf(os.Stderr, "Missing output directives.\n%s\n", g.usage) - os.Exit(-1) - } - if g.sourceName == "" { - fmt.Fprintf(os.Stderr, "No input specified.\n%s\n", g.usage) - os.Exit(-1) - } - // If we get here and the error output is unspecified, write errors to stderr. - if g.errorOutputPath == "" { - g.errorOutputPath = "=" - } -} - -// Generate an error message to be written to stderr or a file. -func (g *Gnostic) errorBytes(err error) []byte { - return []byte("Errors reading " + g.sourceName + "\n" + err.Error()) -} - -// Read an OpenAPI description from YAML or JSON. -func (g *Gnostic) readOpenAPIText(bytes []byte) (message proto.Message, err error) { - info, err := compiler.ReadInfoFromBytes(g.sourceName, bytes) - if err != nil { - return nil, err - } - // Determine the OpenAPI version. - g.openAPIVersion = getOpenAPIVersionFromInfo(info) - if g.openAPIVersion == openAPIvUnknown { - return nil, errors.New("unable to identify OpenAPI version") - } - // Compile to the proto model. - if g.openAPIVersion == openAPIv2 { - document, err := openapi_v2.NewDocument(info, compiler.NewContextWithExtensions("$root", nil, &g.extensionHandlers)) - if err != nil { - return nil, err - } - message = document - } else if g.openAPIVersion == openAPIv3 { - document, err := openapi_v3.NewDocument(info, compiler.NewContextWithExtensions("$root", nil, &g.extensionHandlers)) - if err != nil { - return nil, err - } - message = document - } - return message, err -} - -// Read an OpenAPI binary file. -func (g *Gnostic) readOpenAPIBinary(data []byte) (message proto.Message, err error) { - // try to read an OpenAPI v3 document - documentV3 := &openapi_v3.Document{} - err = proto.Unmarshal(data, documentV3) - if err == nil && strings.HasPrefix(documentV3.Openapi, "3.0") { - g.openAPIVersion = openAPIv3 - return documentV3, nil - } - // if that failed, try to read an OpenAPI v2 document - documentV2 := &openapi_v2.Document{} - err = proto.Unmarshal(data, documentV2) - if err == nil && strings.HasPrefix(documentV2.Swagger, "2.0") { - g.openAPIVersion = openAPIv2 - return documentV2, nil - } - return nil, err -} - -// Write a binary pb representation. -func (g *Gnostic) writeBinaryOutput(message proto.Message) { - protoBytes, err := proto.Marshal(message) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - defer os.Exit(-1) - } else { - writeFile(g.binaryOutputPath, protoBytes, g.sourceName, "pb") - } -} - -// Write a text pb representation. -func (g *Gnostic) writeTextOutput(message proto.Message) { - bytes := []byte(proto.MarshalTextString(message)) - writeFile(g.textOutputPath, bytes, g.sourceName, "text") -} - -// Write JSON/YAML OpenAPI representations. -func (g *Gnostic) writeJSONYAMLOutput(message proto.Message) { - // Convert the OpenAPI document into an exportable MapSlice. - var rawInfo yaml.MapSlice - var ok bool - var err error - if g.openAPIVersion == openAPIv2 { - document := message.(*openapi_v2.Document) - rawInfo, ok = document.ToRawInfo().(yaml.MapSlice) - if !ok { - rawInfo = nil - } - } else if g.openAPIVersion == openAPIv3 { - document := message.(*openapi_v3.Document) - rawInfo, ok = document.ToRawInfo().(yaml.MapSlice) - if !ok { - rawInfo = nil - } - } - // Optionally write description in yaml format. - if g.yamlOutputPath != "" { - var bytes []byte - if rawInfo != nil { - bytes, err = yaml.Marshal(rawInfo) - if err != nil { - fmt.Fprintf(os.Stderr, "Error generating yaml output %s\n", err.Error()) - } - writeFile(g.yamlOutputPath, bytes, g.sourceName, "yaml") - } else { - fmt.Fprintf(os.Stderr, "No yaml output available.\n") - } - } - // Optionally write description in json format. - if g.jsonOutputPath != "" { - var bytes []byte - if rawInfo != nil { - bytes, _ = jsonwriter.Marshal(rawInfo) - if err != nil { - fmt.Fprintf(os.Stderr, "Error generating json output %s\n", err.Error()) - } - writeFile(g.jsonOutputPath, bytes, g.sourceName, "json") - } else { - fmt.Fprintf(os.Stderr, "No json output available.\n") - } - } -} - -// Perform all actions specified in the command-line options. -func (g *Gnostic) performActions(message proto.Message) (err error) { - // Optionally resolve internal references. - if g.resolveReferences { - if g.openAPIVersion == openAPIv2 { - document := message.(*openapi_v2.Document) - _, err = document.ResolveReferences(g.sourceName) - } else if g.openAPIVersion == openAPIv3 { - document := message.(*openapi_v3.Document) - _, err = document.ResolveReferences(g.sourceName) - } - if err != nil { - return err - } - } - // Optionally write proto in binary format. - if g.binaryOutputPath != "" { - g.writeBinaryOutput(message) - } - // Optionally write proto in text format. - if g.textOutputPath != "" { - g.writeTextOutput(message) - } - // Optionaly write document in yaml and/or json formats. - if g.yamlOutputPath != "" || g.jsonOutputPath != "" { - g.writeJSONYAMLOutput(message) - } - // Call all specified plugins. - for _, p := range g.pluginCalls { - err := p.perform(message, g.openAPIVersion, g.sourceName) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - defer os.Exit(-1) // run all plugins, even when some have errors - } - } - return nil -} - -func (g *Gnostic) main() { - var err error - g.readOptions() - g.validateOptions() - // Read the OpenAPI source. - bytes, err := compiler.ReadBytesForFile(g.sourceName) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - os.Exit(-1) - } - extension := strings.ToLower(filepath.Ext(g.sourceName)) - var message proto.Message - if extension == ".json" || extension == ".yaml" { - // Try to read the source as JSON/YAML. - message, err = g.readOpenAPIText(bytes) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - os.Exit(-1) - } - } else if extension == ".pb" { - // Try to read the source as a binary protocol buffer. - message, err = g.readOpenAPIBinary(bytes) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - os.Exit(-1) - } - } else { - err = errors.New("unknown file extension. 'json', 'yaml', and 'pb' are accepted") - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - os.Exit(-1) - } - // Perform actions specified by command options. - err = g.performActions(message) - if err != nil { - writeFile(g.errorOutputPath, g.errorBytes(err), g.sourceName, "errors") - os.Exit(-1) - } -} - -func main() { - g := newGnostic() - g.main() -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE deleted file mode 100644 index b2b065037fc..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go deleted file mode 100644 index d9e87b2f735..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -// gRPC Prometheus monitoring interceptors for client-side gRPC. - -package grpc_prometheus - -import ( - "io" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. -func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - monitor := newClientReporter(Unary, method) - monitor.SentMessage() - err := invoker(ctx, method, req, reply, cc, opts...) - if err != nil { - monitor.ReceivedMessage() - } - monitor.Handled(grpc.Code(err)) - return err -} - -// StreamServerInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. -func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - monitor := newClientReporter(clientStreamType(desc), method) - clientStream, err := streamer(ctx, desc, cc, method, opts...) - if err != nil { - monitor.Handled(grpc.Code(err)) - return nil, err - } - return &monitoredClientStream{clientStream, monitor}, nil -} - -func clientStreamType(desc *grpc.StreamDesc) grpcType { - if desc.ClientStreams && !desc.ServerStreams { - return ClientStream - } else if !desc.ClientStreams && desc.ServerStreams { - return ServerStream - } - return BidiStream -} - -// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters. -type monitoredClientStream struct { - grpc.ClientStream - monitor *clientReporter -} - -func (s *monitoredClientStream) SendMsg(m interface{}) error { - err := s.ClientStream.SendMsg(m) - if err == nil { - s.monitor.SentMessage() - } - return err -} - -func (s *monitoredClientStream) RecvMsg(m interface{}) error { - err := s.ClientStream.RecvMsg(m) - if err == nil { - s.monitor.ReceivedMessage() - } else if err == io.EOF { - s.monitor.Handled(codes.OK) - } else { - s.monitor.Handled(grpc.Code(err)) - } - return err -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go deleted file mode 100644 index 16b76155302..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_prometheus - -import ( - "time" - - "google.golang.org/grpc/codes" - - prom "github.com/prometheus/client_golang/prometheus" -) - -var ( - clientStartedCounter = prom.NewCounterVec( - prom.CounterOpts{ - Namespace: "grpc", - Subsystem: "client", - Name: "started_total", - Help: "Total number of RPCs started on the client.", - }, []string{"grpc_type", "grpc_service", "grpc_method"}) - - clientHandledCounter = prom.NewCounterVec( - prom.CounterOpts{ - Namespace: "grpc", - Subsystem: "client", - Name: "handled_total", - Help: "Total number of RPCs completed by the client, regardless of success or failure.", - }, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}) - - clientStreamMsgReceived = prom.NewCounterVec( - prom.CounterOpts{ - Namespace: "grpc", - Subsystem: "client", - Name: "msg_received_total", - Help: "Total number of RPC stream messages received by the client.", - }, []string{"grpc_type", "grpc_service", "grpc_method"}) - - clientStreamMsgSent = prom.NewCounterVec( - prom.CounterOpts{ - Namespace: "grpc", - Subsystem: "client", - Name: "msg_sent_total", - Help: "Total number of gRPC stream messages sent by the client.", - }, []string{"grpc_type", "grpc_service", "grpc_method"}) - - clientHandledHistogramEnabled = false - clientHandledHistogramOpts = prom.HistogramOpts{ - Namespace: "grpc", - Subsystem: "client", - Name: "handling_seconds", - Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.", - Buckets: prom.DefBuckets, - } - clientHandledHistogram *prom.HistogramVec -) - -func init() { - prom.MustRegister(clientStartedCounter) - prom.MustRegister(clientHandledCounter) - prom.MustRegister(clientStreamMsgReceived) - prom.MustRegister(clientStreamMsgSent) -} - -// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs. -// Histogram metrics can be very expensive for Prometheus to retain and query. -func EnableClientHandlingTimeHistogram(opts ...HistogramOption) { - for _, o := range opts { - o(&clientHandledHistogramOpts) - } - if !clientHandledHistogramEnabled { - clientHandledHistogram = prom.NewHistogramVec( - clientHandledHistogramOpts, - []string{"grpc_type", "grpc_service", "grpc_method"}, - ) - prom.Register(clientHandledHistogram) - } - clientHandledHistogramEnabled = true -} - -type clientReporter struct { - rpcType grpcType - serviceName string - methodName string - startTime time.Time -} - -func newClientReporter(rpcType grpcType, fullMethod string) *clientReporter { - r := &clientReporter{rpcType: rpcType} - if clientHandledHistogramEnabled { - r.startTime = time.Now() - } - r.serviceName, r.methodName = splitMethodName(fullMethod) - clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() - return r -} - -func (r *clientReporter) ReceivedMessage() { - clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() -} - -func (r *clientReporter) SentMessage() { - clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() -} - -func (r *clientReporter) Handled(code codes.Code) { - clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() - if clientHandledHistogramEnabled { - clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go deleted file mode 100644 index f85c8c2374c..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -// gRPC Prometheus monitoring interceptors for server-side gRPC. - -package grpc_prometheus - -import ( - "golang.org/x/net/context" - "google.golang.org/grpc" -) - -// PreregisterServices takes a gRPC server and pre-initializes all counters to 0. -// This allows for easier monitoring in Prometheus (no missing metrics), and should be called *after* all services have -// been registered with the server. -func Register(server *grpc.Server) { - serviceInfo := server.GetServiceInfo() - for serviceName, info := range serviceInfo { - for _, mInfo := range info.Methods { - preRegisterMethod(serviceName, &mInfo) - } - } -} - -// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. -func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - monitor := newServerReporter(Unary, info.FullMethod) - monitor.ReceivedMessage() - resp, err := handler(ctx, req) - monitor.Handled(grpc.Code(err)) - if err == nil { - monitor.SentMessage() - } - return resp, err -} - -// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. -func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - monitor := newServerReporter(streamRpcType(info), info.FullMethod) - err := handler(srv, &monitoredServerStream{ss, monitor}) - monitor.Handled(grpc.Code(err)) - return err -} - -func streamRpcType(info *grpc.StreamServerInfo) grpcType { - if info.IsClientStream && !info.IsServerStream { - return ClientStream - } else if !info.IsClientStream && info.IsServerStream { - return ServerStream - } - return BidiStream -} - -// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters. -type monitoredServerStream struct { - grpc.ServerStream - monitor *serverReporter -} - -func (s *monitoredServerStream) SendMsg(m interface{}) error { - err := s.ServerStream.SendMsg(m) - if err == nil { - s.monitor.SentMessage() - } - return err -} - -func (s *monitoredServerStream) RecvMsg(m interface{}) error { - err := s.ServerStream.RecvMsg(m) - if err == nil { - s.monitor.ReceivedMessage() - } - return err -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go deleted file mode 100644 index 628a890560f..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_prometheus - -import ( - "time" - - "google.golang.org/grpc/codes" - - prom "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc" -) - -type grpcType string - -const ( - Unary grpcType = "unary" - ClientStream grpcType = "client_stream" - ServerStream grpcType = "server_stream" - BidiStream grpcType = "bidi_stream" -) - -var ( - serverStartedCounter = prom.NewCounterVec( - prom.CounterOpts{ - Namespace: "grpc", - Subsystem: "server", - Name: "started_total", - Help: "Total number of RPCs started on the server.", - }, []string{"grpc_type", "grpc_service", "grpc_method"}) - - serverHandledCounter = prom.NewCounterVec( - prom.CounterOpts{ - Namespace: "grpc", - Subsystem: "server", - Name: "handled_total", - Help: "Total number of RPCs completed on the server, regardless of success or failure.", - }, []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}) - - serverStreamMsgReceived = prom.NewCounterVec( - prom.CounterOpts{ - Namespace: "grpc", - Subsystem: "server", - Name: "msg_received_total", - Help: "Total number of RPC stream messages received on the server.", - }, []string{"grpc_type", "grpc_service", "grpc_method"}) - - serverStreamMsgSent = prom.NewCounterVec( - prom.CounterOpts{ - Namespace: "grpc", - Subsystem: "server", - Name: "msg_sent_total", - Help: "Total number of gRPC stream messages sent by the server.", - }, []string{"grpc_type", "grpc_service", "grpc_method"}) - - serverHandledHistogramEnabled = false - serverHandledHistogramOpts = prom.HistogramOpts{ - Namespace: "grpc", - Subsystem: "server", - Name: "handling_seconds", - Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.", - Buckets: prom.DefBuckets, - } - serverHandledHistogram *prom.HistogramVec -) - -func init() { - prom.MustRegister(serverStartedCounter) - prom.MustRegister(serverHandledCounter) - prom.MustRegister(serverStreamMsgReceived) - prom.MustRegister(serverStreamMsgSent) -} - -type HistogramOption func(*prom.HistogramOpts) - -// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on. -func WithHistogramBuckets(buckets []float64) HistogramOption { - return func(o *prom.HistogramOpts) { o.Buckets = buckets } -} - -// EnableHandlingTimeHistogram turns on recording of handling time of RPCs for server-side interceptors. -// Histogram metrics can be very expensive for Prometheus to retain and query. -func EnableHandlingTimeHistogram(opts ...HistogramOption) { - for _, o := range opts { - o(&serverHandledHistogramOpts) - } - if !serverHandledHistogramEnabled { - serverHandledHistogram = prom.NewHistogramVec( - serverHandledHistogramOpts, - []string{"grpc_type", "grpc_service", "grpc_method"}, - ) - prom.Register(serverHandledHistogram) - } - serverHandledHistogramEnabled = true -} - -type serverReporter struct { - rpcType grpcType - serviceName string - methodName string - startTime time.Time -} - -func newServerReporter(rpcType grpcType, fullMethod string) *serverReporter { - r := &serverReporter{rpcType: rpcType} - if serverHandledHistogramEnabled { - r.startTime = time.Now() - } - r.serviceName, r.methodName = splitMethodName(fullMethod) - serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() - return r -} - -func (r *serverReporter) ReceivedMessage() { - serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() -} - -func (r *serverReporter) SentMessage() { - serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() -} - -func (r *serverReporter) Handled(code codes.Code) { - serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() - if serverHandledHistogramEnabled { - serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) - } -} - -// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated. -func preRegisterMethod(serviceName string, mInfo *grpc.MethodInfo) { - methodName := mInfo.Name - methodType := string(typeFromMethodInfo(mInfo)) - // These are just references (no increments), as just referencing will create the labels but not set values. - serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName) - serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName) - serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName) - if serverHandledHistogramEnabled { - serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName) - } - for _, code := range allCodes { - serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String()) - } -} - -func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType { - if mInfo.IsClientStream == false && mInfo.IsServerStream == false { - return Unary - } - if mInfo.IsClientStream == true && mInfo.IsServerStream == false { - return ClientStream - } - if mInfo.IsClientStream == false && mInfo.IsServerStream == true { - return ServerStream - } - return BidiStream -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go deleted file mode 100644 index 372460ac497..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_prometheus - -import ( - "strings" - - "google.golang.org/grpc/codes" -) - -var ( - allCodes = []codes.Code{ - codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound, - codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted, - codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal, - codes.Unavailable, codes.DataLoss, - } -) - -func splitMethodName(fullMethodName string) (string, string) { - fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash - if i := strings.Index(fullMethodName, "/"); i >= 0 { - return fullMethodName[:i], fullMethodName[i+1:] - } - return "unknown", "unknown" -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt deleted file mode 100644 index 364516251b9..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015, Gengo, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of Gengo, Inc. nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go deleted file mode 100644 index f248c738b23..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go +++ /dev/null @@ -1,143 +0,0 @@ -package runtime - -import ( - "fmt" - "net" - "net/http" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" -) - -// MetadataHeaderPrefix is prepended to HTTP headers in order to convert them to -// gRPC metadata for incoming requests processed by grpc-gateway -const MetadataHeaderPrefix = "Grpc-Metadata-" -// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to -// HTTP headers in a response handled by grpc-gateway -const MetadataTrailerPrefix = "Grpc-Trailer-" -const metadataGrpcTimeout = "Grpc-Timeout" - -const xForwardedFor = "X-Forwarded-For" -const xForwardedHost = "X-Forwarded-Host" - -var ( - // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound - // header isn't present. If the value is 0 the sent `context` will not have a timeout. - DefaultContextTimeout = 0 * time.Second -) - -/* -AnnotateContext adds context information such as metadata from the request. - -At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", -except that the forwarded destination is not another HTTP service but rather -a gRPC service. -*/ -func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, error) { - var pairs []string - timeout := DefaultContextTimeout - if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { - var err error - timeout, err = timeoutDecode(tm) - if err != nil { - return nil, grpc.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) - } - } - - for key, vals := range req.Header { - for _, val := range vals { - if key == "Authorization" { - pairs = append(pairs, "authorization", val) - continue - } - if strings.HasPrefix(key, MetadataHeaderPrefix) { - pairs = append(pairs, key[len(MetadataHeaderPrefix):], val) - } - } - } - if host := req.Header.Get(xForwardedHost); host != "" { - pairs = append(pairs, strings.ToLower(xForwardedHost), host) - } else if req.Host != "" { - pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) - } - - if addr := req.RemoteAddr; addr != "" { - if remoteIP, _, err := net.SplitHostPort(addr); err == nil { - if fwd := req.Header.Get(xForwardedFor); fwd == "" { - pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) - } else { - pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) - } - } else { - grpclog.Printf("invalid remote addr: %s", addr) - } - } - - if timeout != 0 { - ctx, _ = context.WithTimeout(ctx, timeout) - } - if len(pairs) == 0 { - return ctx, nil - } - return metadata.NewContext(ctx, metadata.Pairs(pairs...)), nil -} - -// ServerMetadata consists of metadata sent from gRPC server. -type ServerMetadata struct { - HeaderMD metadata.MD - TrailerMD metadata.MD -} - -type serverMetadataKey struct{} - -// NewServerMetadataContext creates a new context with ServerMetadata -func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { - return context.WithValue(ctx, serverMetadataKey{}, md) -} - -// ServerMetadataFromContext returns the ServerMetadata in ctx -func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { - md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) - return -} - -func timeoutDecode(s string) (time.Duration, error) { - size := len(s) - if size < 2 { - return 0, fmt.Errorf("timeout string is too short: %q", s) - } - d, ok := timeoutUnitToDuration(s[size-1]) - if !ok { - return 0, fmt.Errorf("timeout unit is not recognized: %q", s) - } - t, err := strconv.ParseInt(s[:size-1], 10, 64) - if err != nil { - return 0, err - } - return d * time.Duration(t), nil -} - -func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { - switch u { - case 'H': - return time.Hour, true - case 'M': - return time.Minute, true - case 'S': - return time.Second, true - case 'm': - return time.Millisecond, true - case 'u': - return time.Microsecond, true - case 'n': - return time.Nanosecond, true - default: - } - return -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go deleted file mode 100644 index 1af5cc4ebdd..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go +++ /dev/null @@ -1,58 +0,0 @@ -package runtime - -import ( - "strconv" -) - -// String just returns the given string. -// It is just for compatibility to other types. -func String(val string) (string, error) { - return val, nil -} - -// Bool converts the given string representation of a boolean value into bool. -func Bool(val string) (bool, error) { - return strconv.ParseBool(val) -} - -// Float64 converts the given string representation into representation of a floating point number into float64. -func Float64(val string) (float64, error) { - return strconv.ParseFloat(val, 64) -} - -// Float32 converts the given string representation of a floating point number into float32. -func Float32(val string) (float32, error) { - f, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -// Int64 converts the given string representation of an integer into int64. -func Int64(val string) (int64, error) { - return strconv.ParseInt(val, 0, 64) -} - -// Int32 converts the given string representation of an integer into int32. -func Int32(val string) (int32, error) { - i, err := strconv.ParseInt(val, 0, 32) - if err != nil { - return 0, err - } - return int32(i), nil -} - -// Uint64 converts the given string representation of an integer into uint64. -func Uint64(val string) (uint64, error) { - return strconv.ParseUint(val, 0, 64) -} - -// Uint32 converts the given string representation of an integer into uint32. -func Uint32(val string) (uint32, error) { - i, err := strconv.ParseUint(val, 0, 32) - if err != nil { - return 0, err - } - return uint32(i), nil -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go deleted file mode 100644 index b6e5ddf7a9f..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -/* -Package runtime contains runtime helper functions used by -servers which protoc-gen-grpc-gateway generates. -*/ -package runtime diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go deleted file mode 100644 index 0d3cb3bf3ca..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go +++ /dev/null @@ -1,121 +0,0 @@ -package runtime - -import ( - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" -) - -// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. -func HTTPStatusFromCode(code codes.Code) int { - switch code { - case codes.OK: - return http.StatusOK - case codes.Canceled: - return http.StatusRequestTimeout - case codes.Unknown: - return http.StatusInternalServerError - case codes.InvalidArgument: - return http.StatusBadRequest - case codes.DeadlineExceeded: - return http.StatusRequestTimeout - case codes.NotFound: - return http.StatusNotFound - case codes.AlreadyExists: - return http.StatusConflict - case codes.PermissionDenied: - return http.StatusForbidden - case codes.Unauthenticated: - return http.StatusUnauthorized - case codes.ResourceExhausted: - return http.StatusForbidden - case codes.FailedPrecondition: - return http.StatusPreconditionFailed - case codes.Aborted: - return http.StatusConflict - case codes.OutOfRange: - return http.StatusBadRequest - case codes.Unimplemented: - return http.StatusNotImplemented - case codes.Internal: - return http.StatusInternalServerError - case codes.Unavailable: - return http.StatusServiceUnavailable - case codes.DataLoss: - return http.StatusInternalServerError - } - - grpclog.Printf("Unknown gRPC error code: %v", code) - return http.StatusInternalServerError -} - -var ( - // HTTPError replies to the request with the error. - // You can set a custom function to this variable to customize error format. - HTTPError = DefaultHTTPError - // OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest - OtherErrorHandler = DefaultOtherErrorHandler -) - -type errorBody struct { - Error string `protobuf:"bytes,1,name=error" json:"error"` - Code int `protobuf:"bytes,2,name=code" json:"code"` -} - -//Make this also conform to proto.Message for builtin JSONPb Marshaler -func (e *errorBody) Reset() { *e = errorBody{} } -func (e *errorBody) String() string { return proto.CompactTextString(e) } -func (*errorBody) ProtoMessage() {} - -// DefaultHTTPError is the default implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a JSON object, -// which contains a member whose key is "error" and whose value is err.Error(). -func DefaultHTTPError(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { - const fallback = `{"error": "failed to marshal error message"}` - - w.Header().Del("Trailer") - w.Header().Set("Content-Type", marshaler.ContentType()) - body := &errorBody{ - Error: grpc.ErrorDesc(err), - Code: int(grpc.Code(err)), - } - - buf, merr := marshaler.Marshal(body) - if merr != nil { - grpclog.Printf("Failed to marshal error message %q: %v", body, merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Printf("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Printf("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, md) - handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(grpc.Code(err)) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Printf("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. -// It simply writes a string representation of the given error into "w". -func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { - http.Error(w, msg, code) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go deleted file mode 100644 index d7040851ae9..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ /dev/null @@ -1,164 +0,0 @@ -package runtime - -import ( - "fmt" - "io" - "net/http" - "net/textproto" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime/internal" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" -) - -// ForwardResponseStream forwards the stream from gRPC server to REST client. -func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - f, ok := w.(http.Flusher) - if !ok { - grpclog.Printf("Flush not supported in %T", w) - http.Error(w, "unexpected type of web server", http.StatusInternalServerError) - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Printf("Failed to extract ServerMetadata from context") - http.Error(w, "unexpected error", http.StatusInternalServerError) - return - } - handleForwardResponseServerMetadata(w, md) - - w.Header().Set("Transfer-Encoding", "chunked") - w.Header().Set("Content-Type", marshaler.ContentType()) - if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) - f.Flush() - for { - resp, err := recv() - if err == io.EOF { - return - } - if err != nil { - handleForwardResponseStreamError(marshaler, w, err) - return - } - if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - handleForwardResponseStreamError(marshaler, w, err) - return - } - - buf, err := marshaler.Marshal(streamChunk(resp, nil)) - if err != nil { - grpclog.Printf("Failed to marshal response chunk: %v", err) - return - } - if _, err = fmt.Fprintf(w, "%s\n", buf); err != nil { - grpclog.Printf("Failed to send response chunk: %v", err) - return - } - f.Flush() - } -} - -func handleForwardResponseServerMetadata(w http.ResponseWriter, md ServerMetadata) { - for k, vs := range md.HeaderMD { - hKey := fmt.Sprintf("%s%s", MetadataHeaderPrefix, k) - for i := range vs { - w.Header().Add(hKey, vs[i]) - } - } -} - -func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { - for k := range md.TrailerMD { - tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) - w.Header().Add("Trailer", tKey) - } -} - -func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { - for k, vs := range md.TrailerMD { - tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) - for i := range vs { - w.Header().Add(tKey, vs[i]) - } - } -} - -// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. -func ForwardResponseMessage(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Printf("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, md) - handleForwardResponseTrailerHeader(w, md) - w.Header().Set("Content-Type", marshaler.ContentType()) - if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - HTTPError(ctx, marshaler, w, req, err) - return - } - - buf, err := marshaler.Marshal(resp) - if err != nil { - grpclog.Printf("Marshal error: %v", err) - HTTPError(ctx, marshaler, w, req, err) - return - } - - if _, err = w.Write(buf); err != nil { - grpclog.Printf("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { - if len(opts) == 0 { - return nil - } - for _, opt := range opts { - if err := opt(ctx, w, resp); err != nil { - grpclog.Printf("Error handling ForwardResponseOptions: %v", err) - return err - } - } - return nil -} - -func handleForwardResponseStreamError(marshaler Marshaler, w http.ResponseWriter, err error) { - buf, merr := marshaler.Marshal(streamChunk(nil, err)) - if merr != nil { - grpclog.Printf("Failed to marshal an error: %v", merr) - return - } - if _, werr := fmt.Fprintf(w, "%s\n", buf); werr != nil { - grpclog.Printf("Failed to notify error to client: %v", werr) - return - } -} - -func streamChunk(result proto.Message, err error) map[string]proto.Message { - if err != nil { - grpcCode := grpc.Code(err) - httpCode := HTTPStatusFromCode(grpcCode) - return map[string]proto.Message{ - "error": &internal.StreamError{ - GrpcCode: int32(grpcCode), - HttpCode: int32(httpCode), - Message: err.Error(), - HttpStatus: http.StatusText(httpCode), - }, - } - } - if result == nil { - return streamChunk(nil, fmt.Errorf("empty response")) - } - return map[string]proto.Message{"result": result} -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go deleted file mode 100644 index 6f837cfd5d9..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go +++ /dev/null @@ -1,65 +0,0 @@ -// Code generated by protoc-gen-go. -// source: runtime/internal/stream_chunk.proto -// DO NOT EDIT! - -/* -Package internal is a generated protocol buffer package. - -It is generated from these files: - runtime/internal/stream_chunk.proto - -It has these top-level messages: - StreamError -*/ -package internal - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -type StreamError struct { - GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode" json:"grpc_code,omitempty"` - HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode" json:"http_code,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` - HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus" json:"http_status,omitempty"` -} - -func (m *StreamError) Reset() { *m = StreamError{} } -func (m *StreamError) String() string { return proto.CompactTextString(m) } -func (*StreamError) ProtoMessage() {} -func (*StreamError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func init() { - proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") -} - -func init() { proto.RegisterFile("runtime/internal/stream_chunk.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 181 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x34, 0x8e, 0xbf, 0xee, 0x82, 0x30, - 0x14, 0x85, 0xd3, 0xdf, 0x1f, 0x85, 0xcb, 0x46, 0x1c, 0x9a, 0x38, 0x48, 0x74, 0x61, 0x82, 0xc1, - 0x37, 0xd0, 0xf8, 0x02, 0xb0, 0xb9, 0x90, 0x0a, 0x37, 0x40, 0x94, 0x96, 0xdc, 0x5e, 0x62, 0x5c, - 0x7d, 0x72, 0xd3, 0x22, 0xe3, 0xf9, 0xbe, 0x73, 0x92, 0x03, 0x07, 0x9a, 0x34, 0xf7, 0x03, 0xe6, - 0xbd, 0x66, 0x24, 0xad, 0x1e, 0xb9, 0x65, 0x42, 0x35, 0x54, 0x75, 0x37, 0xe9, 0x7b, 0x36, 0x92, - 0x61, 0x13, 0x6f, 0x5a, 0x1a, 0xeb, 0xac, 0x55, 0x8c, 0x4f, 0xf5, 0xca, 0xbe, 0x8b, 0xfd, 0x5b, - 0x40, 0x54, 0xfa, 0xf2, 0x85, 0xc8, 0x50, 0xbc, 0x85, 0xd0, 0xf5, 0xaa, 0xda, 0x34, 0x28, 0x45, - 0x22, 0xd2, 0xff, 0x22, 0x70, 0xe0, 0x6c, 0x1a, 0x74, 0xb2, 0x63, 0x1e, 0x67, 0xf9, 0x33, 0x4b, - 0x07, 0xbc, 0x94, 0xb0, 0x1e, 0xd0, 0x5a, 0xd5, 0xa2, 0xfc, 0x4d, 0x44, 0x1a, 0x16, 0x4b, 0x8c, - 0x77, 0x10, 0xf9, 0x99, 0x65, 0xc5, 0x93, 0x95, 0x7f, 0xde, 0x82, 0x43, 0xa5, 0x27, 0x27, 0xb8, - 0x06, 0xcb, 0xf3, 0xdb, 0xca, 0xbf, 0x3d, 0x7e, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x07, 0x92, - 0xb6, 0xd4, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go deleted file mode 100644 index 0acd2ca29ef..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go +++ /dev/null @@ -1,37 +0,0 @@ -package runtime - -import ( - "encoding/json" - "io" -) - -// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON -// with the standard "encoding/json" package of Golang. -// Although it is generally faster for simple proto messages than JSONPb, -// it does not support advanced features of protobuf, e.g. map, oneof, .... -type JSONBuiltin struct{} - -// ContentType always Returns "application/json". -func (*JSONBuiltin) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { - return json.NewDecoder(r) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { - return json.NewEncoder(w) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go deleted file mode 100644 index 49f13f7fc74..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go +++ /dev/null @@ -1,184 +0,0 @@ -package runtime - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" -) - -// JSONPb is a Marshaler which marshals/unmarshals into/from JSON -// with the "github.com/golang/protobuf/jsonpb". -// It supports fully functionality of protobuf unlike JSONBuiltin. -type JSONPb jsonpb.Marshaler - -// ContentType always returns "application/json". -func (*JSONPb) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -// Currently it can marshal only proto.Message. -// TODO(yugui) Support fields of primitive types in a message. -func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { - if _, ok := v.(proto.Message); !ok { - return j.marshalNonProtoField(v) - } - - var buf bytes.Buffer - if err := j.marshalTo(&buf, v); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - buf, err := j.marshalNonProtoField(v) - if err != nil { - return err - } - _, err = w.Write(buf) - return err - } - return (*jsonpb.Marshaler)(j).Marshal(w, p) -} - -// marshalNonProto marshals a non-message field of a protobuf message. -// This function does not correctly marshals arbitary data structure into JSON, -// but it is only capable of marshaling non-message field values of protobuf, -// i.e. primitive types, enums; pointers to primitives or enums; maps from -// integer/string types to primitives/enums/pointers to messages. -func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { - rv := reflect.ValueOf(v) - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return []byte("null"), nil - } - rv = rv.Elem() - } - - if rv.Kind() == reflect.Map { - m := make(map[string]*json.RawMessage) - for _, k := range rv.MapKeys() { - buf, err := j.Marshal(rv.MapIndex(k).Interface()) - if err != nil { - return nil, err - } - m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) - } - if j.Indent != "" { - return json.MarshalIndent(m, "", j.Indent) - } - return json.Marshal(m) - } - if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { - return json.Marshal(enum.String()) - } - return json.Marshal(rv.Interface()) -} - -// Unmarshal unmarshals JSON "data" into "v" -// Currently it can marshal only proto.Message. -// TODO(yugui) Support fields of primitive types in a message. -func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { - return unmarshalJSONPb(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JSONPb) NewDecoder(r io.Reader) Decoder { - d := json.NewDecoder(r) - return DecoderFunc(func(v interface{}) error { return decodeJSONPb(d, v) }) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JSONPb) NewEncoder(w io.Writer) Encoder { - return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) }) -} - -func unmarshalJSONPb(data []byte, v interface{}) error { - d := json.NewDecoder(bytes.NewReader(data)) - return decodeJSONPb(d, v) -} - -func decodeJSONPb(d *json.Decoder, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - return decodeNonProtoField(d, v) - } - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} - return unmarshaler.UnmarshalNext(d, p) -} - -func decodeNonProtoField(d *json.Decoder, v interface{}) error { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return fmt.Errorf("%T is not a pointer", v) - } - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - if rv.Type().ConvertibleTo(typeProtoMessage) { - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} - return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) - } - rv = rv.Elem() - } - if rv.Kind() == reflect.Map { - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - conv, ok := convFromType[rv.Type().Key().Kind()] - if !ok { - return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) - } - - m := make(map[string]*json.RawMessage) - if err := d.Decode(&m); err != nil { - return err - } - for k, v := range m { - result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - bk := result[0] - bv := reflect.New(rv.Type().Elem()) - if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { - return err - } - rv.SetMapIndex(bk, bv.Elem()) - } - return nil - } - if _, ok := rv.Interface().(protoEnum); ok { - var repr interface{} - if err := d.Decode(&repr); err != nil { - return err - } - switch repr.(type) { - case string: - // TODO(yugui) Should use proto.StructProperties? - return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) - case float64: - rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) - return nil - default: - return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) - } - } - return d.Decode(v) -} - -type protoEnum interface { - fmt.Stringer - EnumDescriptor() ([]byte, []int) -} - -var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go deleted file mode 100644 index 6d434f13cb4..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go +++ /dev/null @@ -1,42 +0,0 @@ -package runtime - -import ( - "io" -) - -// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. -type Marshaler interface { - // Marshal marshals "v" into byte sequence. - Marshal(v interface{}) ([]byte, error) - // Unmarshal unmarshals "data" into "v". - // "v" must be a pointer value. - Unmarshal(data []byte, v interface{}) error - // NewDecoder returns a Decoder which reads byte sequence from "r". - NewDecoder(r io.Reader) Decoder - // NewEncoder returns an Encoder which writes bytes sequence into "w". - NewEncoder(w io.Writer) Encoder - // ContentType returns the Content-Type which this marshaler is responsible for. - ContentType() string -} - -// Decoder decodes a byte sequence -type Decoder interface { - Decode(v interface{}) error -} - -// Encoder encodes gRPC payloads / fields into byte sequence. -type Encoder interface { - Encode(v interface{}) error -} - -// DecoderFunc adapts an decoder function into Decoder. -type DecoderFunc func(v interface{}) error - -// Decode delegates invocations to the underlying function itself. -func (f DecoderFunc) Decode(v interface{}) error { return f(v) } - -// EncoderFunc adapts an encoder function into Encoder -type EncoderFunc func(v interface{}) error - -// Encode delegates invocations to the underlying function itself. -func (f EncoderFunc) Encode(v interface{}) error { return f(v) } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go deleted file mode 100644 index 928f0733214..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go +++ /dev/null @@ -1,91 +0,0 @@ -package runtime - -import ( - "errors" - "net/http" -) - -// MIMEWildcard is the fallback MIME type used for requests which do not match -// a registered MIME type. -const MIMEWildcard = "*" - -var ( - acceptHeader = http.CanonicalHeaderKey("Accept") - contentTypeHeader = http.CanonicalHeaderKey("Content-Type") - - defaultMarshaler = &JSONPb{OrigName: true} -) - -// MarshalerForRequest returns the inbound/outbound marshalers for this request. -// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. -// If it isn't set (or the request Content-Type is empty), checks for "*". -// If there are multiple Content-Type headers set, choose the first one that it can -// exactly match in the registry. -// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. -func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { - for _, acceptVal := range r.Header[acceptHeader] { - if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { - outbound = m - break - } - } - - for _, contentTypeVal := range r.Header[contentTypeHeader] { - if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok { - inbound = m - break - } - } - - if inbound == nil { - inbound = mux.marshalers.mimeMap[MIMEWildcard] - } - if outbound == nil { - outbound = inbound - } - - return inbound, outbound -} - -// marshalerRegistry is a mapping from MIME types to Marshalers. -type marshalerRegistry struct { - mimeMap map[string]Marshaler -} - -// add adds a marshaler for a case-sensitive MIME type string ("*" to match any -// MIME type). -func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { - if len(mime) == 0 { - return errors.New("empty MIME type") - } - - m.mimeMap[mime] = marshaler - - return nil -} - -// makeMarshalerMIMERegistry returns a new registry of marshalers. -// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. -// -// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "applicaton/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. -// "*" can be used to match any Content-Type. -// This can be attached to a ServerMux with the marshaler option. -func makeMarshalerMIMERegistry() marshalerRegistry { - return marshalerRegistry{ - mimeMap: map[string]Marshaler{ - MIMEWildcard: defaultMarshaler, - }, - } -} - -// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound -// Marshalers to a MIME type in mux. -func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { - return func(mux *ServeMux) { - if err := mux.marshalers.add(mime, marshaler); err != nil { - panic(err) - } - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go deleted file mode 100644 index 2e6c5621302..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ /dev/null @@ -1,132 +0,0 @@ -package runtime - -import ( - "net/http" - "strings" - - "golang.org/x/net/context" - - "github.com/golang/protobuf/proto" -) - -// A HandlerFunc handles a specific pair of path pattern and HTTP method. -type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) - -// ServeMux is a request multiplexer for grpc-gateway. -// It matches http requests to patterns and invokes the corresponding handler. -type ServeMux struct { - // handlers maps HTTP method to a list of handlers. - handlers map[string][]handler - forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error - marshalers marshalerRegistry -} - -// ServeMuxOption is an option that can be given to a ServeMux on construction. -type ServeMuxOption func(*ServeMux) - -// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. -// -// forwardResponseOption is an option that will be called on the relevant context.Context, -// http.ResponseWriter, and proto.Message before every forwarded response. -// -// The message may be nil in the case where just a header is being sent. -func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) - } -} - -// NewServeMux returns a new ServeMux whose internal mapping is empty. -func NewServeMux(opts ...ServeMuxOption) *ServeMux { - serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - } - - for _, opt := range opts { - opt(serveMux) - } - return serveMux -} - -// Handle associates "h" to the pair of HTTP method and path pattern. -func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { - s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) -} - -// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. -func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - path := r.URL.Path - if !strings.HasPrefix(path, "/") { - OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) - return - } - - components := strings.Split(path[1:], "/") - l := len(components) - var verb string - if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - return - } else if idx > 0 { - c := components[l-1] - components[l-1], verb = c[:idx], c[idx+1:] - } - - if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && isPathLengthFallback(r) { - r.Method = strings.ToUpper(override) - if err := r.ParseForm(); err != nil { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - return - } - } - for _, h := range s.handlers[r.Method] { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - h.h(w, r, pathParams) - return - } - - // lookup other methods to handle fallback from GET to POST and - // to determine if it is MethodNotAllowed or NotFound. - for m, handlers := range s.handlers { - if m == r.Method { - continue - } - for _, h := range handlers { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - // X-HTTP-Method-Override is optional. Always allow fallback to POST. - if isPathLengthFallback(r) { - if err := r.ParseForm(); err != nil { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - return - } - h.h(w, r, pathParams) - return - } - OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) - return - } - } - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) -} - -// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. -func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { - return s.forwardResponseOptions -} - -func isPathLengthFallback(r *http.Request) bool { - return r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" -} - -type handler struct { - pat Pattern - h HandlerFunc -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go deleted file mode 100644 index 3947dbea023..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ /dev/null @@ -1,227 +0,0 @@ -package runtime - -import ( - "errors" - "fmt" - "strings" - - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -var ( - // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. - ErrNotMatch = errors.New("not match to the path pattern") - // ErrInvalidPattern indicates that the given definition of Pattern is not valid. - ErrInvalidPattern = errors.New("invalid pattern") -) - -type op struct { - code utilities.OpCode - operand int -} - -// Pattern is a template pattern of http request paths defined in third_party/googleapis/google/api/http.proto. -type Pattern struct { - // ops is a list of operations - ops []op - // pool is a constant pool indexed by the operands or vars. - pool []string - // vars is a list of variables names to be bound by this pattern - vars []string - // stacksize is the max depth of the stack - stacksize int - // tailLen is the length of the fixed-size segments after a deep wildcard - tailLen int - // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. - verb string -} - -// NewPattern returns a new Pattern from the given definition values. -// "ops" is a sequence of op codes. "pool" is a constant pool. -// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. -// "version" must be 1 for now. -// It returns an error if the given definition is invalid. -func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { - if version != 1 { - grpclog.Printf("unsupported version: %d", version) - return Pattern{}, ErrInvalidPattern - } - - l := len(ops) - if l%2 != 0 { - grpclog.Printf("odd number of ops codes: %d", l) - return Pattern{}, ErrInvalidPattern - } - - var ( - typedOps []op - stack, maxstack int - tailLen int - pushMSeen bool - vars []string - ) - for i := 0; i < l; i += 2 { - op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush: - if pushMSeen { - tailLen++ - } - stack++ - case utilities.OpPushM: - if pushMSeen { - grpclog.Printf("pushM appears twice") - return Pattern{}, ErrInvalidPattern - } - pushMSeen = true - stack++ - case utilities.OpLitPush: - if op.operand < 0 || len(pool) <= op.operand { - grpclog.Printf("negative literal index: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - if pushMSeen { - tailLen++ - } - stack++ - case utilities.OpConcatN: - if op.operand <= 0 { - grpclog.Printf("negative concat size: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - stack -= op.operand - if stack < 0 { - grpclog.Print("stack underflow") - return Pattern{}, ErrInvalidPattern - } - stack++ - case utilities.OpCapture: - if op.operand < 0 || len(pool) <= op.operand { - grpclog.Printf("variable name index out of bound: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - v := pool[op.operand] - op.operand = len(vars) - vars = append(vars, v) - stack-- - if stack < 0 { - grpclog.Printf("stack underflow") - return Pattern{}, ErrInvalidPattern - } - default: - grpclog.Printf("invalid opcode: %d", op.code) - return Pattern{}, ErrInvalidPattern - } - - if maxstack < stack { - maxstack = stack - } - typedOps = append(typedOps, op) - } - return Pattern{ - ops: typedOps, - pool: pool, - vars: vars, - stacksize: maxstack, - tailLen: tailLen, - verb: verb, - }, nil -} - -// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. -func MustPattern(p Pattern, err error) Pattern { - if err != nil { - grpclog.Fatalf("Pattern initialization failed: %v", err) - } - return p -} - -// Match examines components if it matches to the Pattern. -// If it matches, the function returns a mapping from field paths to their captured values. -// If otherwise, the function returns an error. -func (p Pattern) Match(components []string, verb string) (map[string]string, error) { - if p.verb != verb { - return nil, ErrNotMatch - } - - var pos int - stack := make([]string, 0, p.stacksize) - captured := make([]string, len(p.vars)) - l := len(components) - for _, op := range p.ops { - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush, utilities.OpLitPush: - if pos >= l { - return nil, ErrNotMatch - } - c := components[pos] - if op.code == utilities.OpLitPush { - if lit := p.pool[op.operand]; c != lit { - return nil, ErrNotMatch - } - } - stack = append(stack, c) - pos++ - case utilities.OpPushM: - end := len(components) - if end < pos+p.tailLen { - return nil, ErrNotMatch - } - end -= p.tailLen - stack = append(stack, strings.Join(components[pos:end], "/")) - pos = end - case utilities.OpConcatN: - n := op.operand - l := len(stack) - n - stack = append(stack[:l], strings.Join(stack[l:], "/")) - case utilities.OpCapture: - n := len(stack) - 1 - captured[op.operand] = stack[n] - stack = stack[:n] - } - } - if pos < l { - return nil, ErrNotMatch - } - bindings := make(map[string]string) - for i, val := range captured { - bindings[p.vars[i]] = val - } - return bindings, nil -} - -// Verb returns the verb part of the Pattern. -func (p Pattern) Verb() string { return p.verb } - -func (p Pattern) String() string { - var stack []string - for _, op := range p.ops { - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush: - stack = append(stack, "*") - case utilities.OpLitPush: - stack = append(stack, p.pool[op.operand]) - case utilities.OpPushM: - stack = append(stack, "**") - case utilities.OpConcatN: - n := op.operand - l := len(stack) - n - stack = append(stack[:l], strings.Join(stack[l:], "/")) - case utilities.OpCapture: - n := len(stack) - 1 - stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) - } - } - segs := strings.Join(stack, "/") - if p.verb != "" { - return fmt.Sprintf("/%s:%s", segs, p.verb) - } - return "/" + segs -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go deleted file mode 100644 index a3151e2a552..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go +++ /dev/null @@ -1,80 +0,0 @@ -package runtime - -import ( - "github.com/golang/protobuf/proto" -) - -// StringP returns a pointer to a string whose pointee is same as the given string value. -func StringP(val string) (*string, error) { - return proto.String(val), nil -} - -// BoolP parses the given string representation of a boolean value, -// and returns a pointer to a bool whose value is same as the parsed value. -func BoolP(val string) (*bool, error) { - b, err := Bool(val) - if err != nil { - return nil, err - } - return proto.Bool(b), nil -} - -// Float64P parses the given string representation of a floating point number, -// and returns a pointer to a float64 whose value is same as the parsed number. -func Float64P(val string) (*float64, error) { - f, err := Float64(val) - if err != nil { - return nil, err - } - return proto.Float64(f), nil -} - -// Float32P parses the given string representation of a floating point number, -// and returns a pointer to a float32 whose value is same as the parsed number. -func Float32P(val string) (*float32, error) { - f, err := Float32(val) - if err != nil { - return nil, err - } - return proto.Float32(f), nil -} - -// Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. -func Int64P(val string) (*int64, error) { - i, err := Int64(val) - if err != nil { - return nil, err - } - return proto.Int64(i), nil -} - -// Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. -func Int32P(val string) (*int32, error) { - i, err := Int32(val) - if err != nil { - return nil, err - } - return proto.Int32(i), err -} - -// Uint64P parses the given string representation of an integer -// and returns a pointer to a uint64 whose value is same as the parsed integer. -func Uint64P(val string) (*uint64, error) { - i, err := Uint64(val) - if err != nil { - return nil, err - } - return proto.Uint64(i), err -} - -// Uint32P parses the given string representation of an integer -// and returns a pointer to a uint32 whose value is same as the parsed integer. -func Uint32P(val string) (*uint32, error) { - i, err := Uint32(val) - if err != nil { - return nil, err - } - return proto.Uint32(i), err -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go deleted file mode 100644 index 56a919a52f1..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go +++ /dev/null @@ -1,140 +0,0 @@ -package runtime - -import ( - "fmt" - "net/url" - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -// PopulateQueryParameters populates "values" into "msg". -// A value is ignored if its key starts with one of the elements in "filter". -func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { - for key, values := range values { - fieldPath := strings.Split(key, ".") - if filter.HasCommonPrefix(fieldPath) { - continue - } - if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { - return err - } - } - return nil -} - -// PopulateFieldFromPath sets a value in a nested Protobuf structure. -// It instantiates missing protobuf fields as it goes. -func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { - fieldPath := strings.Split(fieldPathString, ".") - return populateFieldValueFromPath(msg, fieldPath, []string{value}) -} - -func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { - m := reflect.ValueOf(msg) - if m.Kind() != reflect.Ptr { - return fmt.Errorf("unexpected type %T: %v", msg, msg) - } - m = m.Elem() - for i, fieldName := range fieldPath { - isLast := i == len(fieldPath)-1 - if !isLast && m.Kind() != reflect.Struct { - return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) - } - f := fieldByProtoName(m, fieldName) - if !f.IsValid() { - grpclog.Printf("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) - return nil - } - - switch f.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: - m = f - case reflect.Slice: - // TODO(yugui) Support []byte - if !isLast { - return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) - } - return populateRepeatedField(f, values) - case reflect.Ptr: - if f.IsNil() { - m = reflect.New(f.Type().Elem()) - f.Set(m) - } - m = f.Elem() - continue - case reflect.Struct: - m = f - continue - default: - return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) - } - } - switch len(values) { - case 0: - return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) - case 1: - default: - grpclog.Printf("too many field values: %s", strings.Join(fieldPath, ".")) - } - return populateField(m, values[0]) -} - -// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". -// "m" must be a struct value. It returns zero reflect.Value if no such field found. -func fieldByProtoName(m reflect.Value, name string) reflect.Value { - props := proto.GetProperties(m.Type()) - for _, p := range props.Prop { - if p.OrigName == name { - return m.FieldByName(p.Name) - } - } - return reflect.Value{} -} - -func populateRepeatedField(f reflect.Value, values []string) error { - elemType := f.Type().Elem() - conv, ok := convFromType[elemType.Kind()] - if !ok { - return fmt.Errorf("unsupported field type %s", elemType) - } - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values))) - for i, v := range values { - result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Index(i).Set(result[0]) - } - return nil -} - -func populateField(f reflect.Value, value string) error { - conv, ok := convFromType[f.Kind()] - if !ok { - return fmt.Errorf("unsupported field type %T", f) - } - result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Set(result[0]) - return nil -} - -var ( - convFromType = map[reflect.Kind]reflect.Value{ - reflect.String: reflect.ValueOf(String), - reflect.Bool: reflect.ValueOf(Bool), - reflect.Float64: reflect.ValueOf(Float64), - reflect.Float32: reflect.ValueOf(Float32), - reflect.Int64: reflect.ValueOf(Int64), - reflect.Int32: reflect.ValueOf(Int32), - reflect.Uint64: reflect.ValueOf(Uint64), - reflect.Uint32: reflect.ValueOf(Uint32), - // TODO(yugui) Support []byte - } -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go deleted file mode 100644 index cf79a4d5886..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package utilities provides members for internal use in grpc-gateway. -package utilities diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go deleted file mode 100644 index 28ad9461f86..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go +++ /dev/null @@ -1,22 +0,0 @@ -package utilities - -// An OpCode is a opcode of compiled path patterns. -type OpCode int - -// These constants are the valid values of OpCode. -const ( - // OpNop does nothing - OpNop = OpCode(iota) - // OpPush pushes a component to stack - OpPush - // OpLitPush pushes a component to stack if it matches to the literal - OpLitPush - // OpPushM concatenates the remaining components and pushes it to stack - OpPushM - // OpConcatN pops N items from stack, concatenates them and pushes it back to stack - OpConcatN - // OpCapture pops an item and binds it to the variable - OpCapture - // OpEnd is the least postive invalid opcode. - OpEnd -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go deleted file mode 100644 index c2b7b30dd91..00000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go +++ /dev/null @@ -1,177 +0,0 @@ -package utilities - -import ( - "sort" -) - -// DoubleArray is a Double Array implementation of trie on sequences of strings. -type DoubleArray struct { - // Encoding keeps an encoding from string to int - Encoding map[string]int - // Base is the base array of Double Array - Base []int - // Check is the check array of Double Array - Check []int -} - -// NewDoubleArray builds a DoubleArray from a set of sequences of strings. -func NewDoubleArray(seqs [][]string) *DoubleArray { - da := &DoubleArray{Encoding: make(map[string]int)} - if len(seqs) == 0 { - return da - } - - encoded := registerTokens(da, seqs) - sort.Sort(byLex(encoded)) - - root := node{row: -1, col: -1, left: 0, right: len(encoded)} - addSeqs(da, encoded, 0, root) - - for i := len(da.Base); i > 0; i-- { - if da.Check[i-1] != 0 { - da.Base = da.Base[:i] - da.Check = da.Check[:i] - break - } - } - return da -} - -func registerTokens(da *DoubleArray, seqs [][]string) [][]int { - var result [][]int - for _, seq := range seqs { - var encoded []int - for _, token := range seq { - if _, ok := da.Encoding[token]; !ok { - da.Encoding[token] = len(da.Encoding) - } - encoded = append(encoded, da.Encoding[token]) - } - result = append(result, encoded) - } - for i := range result { - result[i] = append(result[i], len(da.Encoding)) - } - return result -} - -type node struct { - row, col int - left, right int -} - -func (n node) value(seqs [][]int) int { - return seqs[n.row][n.col] -} - -func (n node) children(seqs [][]int) []*node { - var result []*node - lastVal := int(-1) - last := new(node) - for i := n.left; i < n.right; i++ { - if lastVal == seqs[i][n.col+1] { - continue - } - last.right = i - last = &node{ - row: i, - col: n.col + 1, - left: i, - } - result = append(result, last) - } - last.right = n.right - return result -} - -func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { - ensureSize(da, pos) - - children := n.children(seqs) - var i int - for i = 1; ; i++ { - ok := func() bool { - for _, child := range children { - code := child.value(seqs) - j := i + code - ensureSize(da, j) - if da.Check[j] != 0 { - return false - } - } - return true - }() - if ok { - break - } - } - da.Base[pos] = i - for _, child := range children { - code := child.value(seqs) - j := i + code - da.Check[j] = pos + 1 - } - terminator := len(da.Encoding) - for _, child := range children { - code := child.value(seqs) - if code == terminator { - continue - } - j := i + code - addSeqs(da, seqs, j, *child) - } -} - -func ensureSize(da *DoubleArray, i int) { - for i >= len(da.Base) { - da.Base = append(da.Base, make([]int, len(da.Base)+1)...) - da.Check = append(da.Check, make([]int, len(da.Check)+1)...) - } -} - -type byLex [][]int - -func (l byLex) Len() int { return len(l) } -func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l byLex) Less(i, j int) bool { - si := l[i] - sj := l[j] - var k int - for k = 0; k < len(si) && k < len(sj); k++ { - if si[k] < sj[k] { - return true - } - if si[k] > sj[k] { - return false - } - } - if k < len(sj) { - return true - } - return false -} - -// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. -func (da *DoubleArray) HasCommonPrefix(seq []string) bool { - if len(da.Base) == 0 { - return false - } - - var i int - for _, t := range seq { - code, ok := da.Encoding[t] - if !ok { - break - } - j := da.Base[i] + code - if len(da.Check) <= j || da.Check[j] != i+1 { - break - } - i = j - } - j := da.Base[i] + len(da.Encoding) - if len(da.Check) <= j || da.Check[j] != i+1 { - return false - } - return true -} diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go deleted file mode 100644 index 337d963296c..00000000000 --- a/vendor/github.com/hashicorp/golang-lru/2q.go +++ /dev/null @@ -1,212 +0,0 @@ -package lru - -import ( - "fmt" - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -const ( - // Default2QRecentRatio is the ratio of the 2Q cache dedicated - // to recently added entries that have only been accessed once. - Default2QRecentRatio = 0.25 - - // Default2QGhostEntries is the default ratio of ghost - // entries kept to track entries recently evicted - Default2QGhostEntries = 0.50 -) - -// TwoQueueCache is a thread-safe fixed size 2Q cache. -// 2Q is an enhancement over the standard LRU cache -// in that it tracks both frequently and recently used -// entries separately. This avoids a burst in access to new -// entries from evicting frequently used entries. It adds some -// additional tracking overhead to the standard LRU cache, and is -// computationally about 2x the cost, and adds some metadata over -// head. The ARCCache is similar, but does not require setting any -// parameters. -type TwoQueueCache struct { - size int - recentSize int - - recent *simplelru.LRU - frequent *simplelru.LRU - recentEvict *simplelru.LRU - lock sync.RWMutex -} - -// New2Q creates a new TwoQueueCache using the default -// values for the parameters. -func New2Q(size int) (*TwoQueueCache, error) { - return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) -} - -// New2QParams creates a new TwoQueueCache using the provided -// parameter values. -func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { - if size <= 0 { - return nil, fmt.Errorf("invalid size") - } - if recentRatio < 0.0 || recentRatio > 1.0 { - return nil, fmt.Errorf("invalid recent ratio") - } - if ghostRatio < 0.0 || ghostRatio > 1.0 { - return nil, fmt.Errorf("invalid ghost ratio") - } - - // Determine the sub-sizes - recentSize := int(float64(size) * recentRatio) - evictSize := int(float64(size) * ghostRatio) - - // Allocate the LRUs - recent, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - frequent, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - recentEvict, err := simplelru.NewLRU(evictSize, nil) - if err != nil { - return nil, err - } - - // Initialize the cache - c := &TwoQueueCache{ - size: size, - recentSize: recentSize, - recent: recent, - frequent: frequent, - recentEvict: recentEvict, - } - return c, nil -} - -func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if this is a frequent value - if val, ok := c.frequent.Get(key); ok { - return val, ok - } - - // If the value is contained in recent, then we - // promote it to frequent - if val, ok := c.recent.Peek(key); ok { - c.recent.Remove(key) - c.frequent.Add(key, val) - return val, ok - } - - // No hit - return nil, false -} - -func (c *TwoQueueCache) Add(key, value interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if the value is frequently used already, - // and just update the value - if c.frequent.Contains(key) { - c.frequent.Add(key, value) - return - } - - // Check if the value is recently used, and promote - // the value into the frequent list - if c.recent.Contains(key) { - c.recent.Remove(key) - c.frequent.Add(key, value) - return - } - - // If the value was recently evicted, add it to the - // frequently used list - if c.recentEvict.Contains(key) { - c.ensureSpace(true) - c.recentEvict.Remove(key) - c.frequent.Add(key, value) - return - } - - // Add to the recently seen list - c.ensureSpace(false) - c.recent.Add(key, value) - return -} - -// ensureSpace is used to ensure we have space in the cache -func (c *TwoQueueCache) ensureSpace(recentEvict bool) { - // If we have space, nothing to do - recentLen := c.recent.Len() - freqLen := c.frequent.Len() - if recentLen+freqLen < c.size { - return - } - - // If the recent buffer is larger than - // the target, evict from there - if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { - k, _, _ := c.recent.RemoveOldest() - c.recentEvict.Add(k, nil) - return - } - - // Remove from the frequent list otherwise - c.frequent.RemoveOldest() -} - -func (c *TwoQueueCache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.recent.Len() + c.frequent.Len() -} - -func (c *TwoQueueCache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - k1 := c.frequent.Keys() - k2 := c.recent.Keys() - return append(k1, k2...) -} - -func (c *TwoQueueCache) Remove(key interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - if c.frequent.Remove(key) { - return - } - if c.recent.Remove(key) { - return - } - if c.recentEvict.Remove(key) { - return - } -} - -func (c *TwoQueueCache) Purge() { - c.lock.Lock() - defer c.lock.Unlock() - c.recent.Purge() - c.frequent.Purge() - c.recentEvict.Purge() -} - -func (c *TwoQueueCache) Contains(key interface{}) bool { - c.lock.RLock() - defer c.lock.RUnlock() - return c.frequent.Contains(key) || c.recent.Contains(key) -} - -func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) { - c.lock.RLock() - defer c.lock.RUnlock() - if val, ok := c.frequent.Peek(key); ok { - return val, ok - } - return c.recent.Peek(key) -} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE deleted file mode 100644 index be2cc4dfb60..00000000000 --- a/vendor/github.com/hashicorp/golang-lru/LICENSE +++ /dev/null @@ -1,362 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go deleted file mode 100644 index a2a25281733..00000000000 --- a/vendor/github.com/hashicorp/golang-lru/arc.go +++ /dev/null @@ -1,257 +0,0 @@ -package lru - -import ( - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). -// ARC is an enhancement over the standard LRU cache in that tracks both -// frequency and recency of use. This avoids a burst in access to new -// entries from evicting the frequently used older entries. It adds some -// additional tracking overhead to a standard LRU cache, computationally -// it is roughly 2x the cost, and the extra memory overhead is linear -// with the size of the cache. ARC has been patented by IBM, but is -// similar to the TwoQueueCache (2Q) which requires setting parameters. -type ARCCache struct { - size int // Size is the total capacity of the cache - p int // P is the dynamic preference towards T1 or T2 - - t1 *simplelru.LRU // T1 is the LRU for recently accessed items - b1 *simplelru.LRU // B1 is the LRU for evictions from t1 - - t2 *simplelru.LRU // T2 is the LRU for frequently accessed items - b2 *simplelru.LRU // B2 is the LRU for evictions from t2 - - lock sync.RWMutex -} - -// NewARC creates an ARC of the given size -func NewARC(size int) (*ARCCache, error) { - // Create the sub LRUs - b1, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - b2, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - t1, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - t2, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - - // Initialize the ARC - c := &ARCCache{ - size: size, - p: 0, - t1: t1, - b1: b1, - t2: t2, - b2: b2, - } - return c, nil -} - -// Get looks up a key's value from the cache. -func (c *ARCCache) Get(key interface{}) (interface{}, bool) { - c.lock.Lock() - defer c.lock.Unlock() - - // Ff the value is contained in T1 (recent), then - // promote it to T2 (frequent) - if val, ok := c.t1.Peek(key); ok { - c.t1.Remove(key) - c.t2.Add(key, val) - return val, ok - } - - // Check if the value is contained in T2 (frequent) - if val, ok := c.t2.Get(key); ok { - return val, ok - } - - // No hit - return nil, false -} - -// Add adds a value to the cache. -func (c *ARCCache) Add(key, value interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if the value is contained in T1 (recent), and potentially - // promote it to frequent T2 - if c.t1.Contains(key) { - c.t1.Remove(key) - c.t2.Add(key, value) - return - } - - // Check if the value is already in T2 (frequent) and update it - if c.t2.Contains(key) { - c.t2.Add(key, value) - return - } - - // Check if this value was recently evicted as part of the - // recently used list - if c.b1.Contains(key) { - // T1 set is too small, increase P appropriately - delta := 1 - b1Len := c.b1.Len() - b2Len := c.b2.Len() - if b2Len > b1Len { - delta = b2Len / b1Len - } - if c.p+delta >= c.size { - c.p = c.size - } else { - c.p += delta - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(false) - } - - // Remove from B1 - c.b1.Remove(key) - - // Add the key to the frequently used list - c.t2.Add(key, value) - return - } - - // Check if this value was recently evicted as part of the - // frequently used list - if c.b2.Contains(key) { - // T2 set is too small, decrease P appropriately - delta := 1 - b1Len := c.b1.Len() - b2Len := c.b2.Len() - if b1Len > b2Len { - delta = b1Len / b2Len - } - if delta >= c.p { - c.p = 0 - } else { - c.p -= delta - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(true) - } - - // Remove from B2 - c.b2.Remove(key) - - // Add the key to the frequntly used list - c.t2.Add(key, value) - return - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(false) - } - - // Keep the size of the ghost buffers trim - if c.b1.Len() > c.size-c.p { - c.b1.RemoveOldest() - } - if c.b2.Len() > c.p { - c.b2.RemoveOldest() - } - - // Add to the recently seen list - c.t1.Add(key, value) - return -} - -// replace is used to adaptively evict from either T1 or T2 -// based on the current learned value of P -func (c *ARCCache) replace(b2ContainsKey bool) { - t1Len := c.t1.Len() - if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { - k, _, ok := c.t1.RemoveOldest() - if ok { - c.b1.Add(k, nil) - } - } else { - k, _, ok := c.t2.RemoveOldest() - if ok { - c.b2.Add(k, nil) - } - } -} - -// Len returns the number of cached entries -func (c *ARCCache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.t1.Len() + c.t2.Len() -} - -// Keys returns all the cached keys -func (c *ARCCache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - k1 := c.t1.Keys() - k2 := c.t2.Keys() - return append(k1, k2...) -} - -// Remove is used to purge a key from the cache -func (c *ARCCache) Remove(key interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - if c.t1.Remove(key) { - return - } - if c.t2.Remove(key) { - return - } - if c.b1.Remove(key) { - return - } - if c.b2.Remove(key) { - return - } -} - -// Purge is used to clear the cache -func (c *ARCCache) Purge() { - c.lock.Lock() - defer c.lock.Unlock() - c.t1.Purge() - c.t2.Purge() - c.b1.Purge() - c.b2.Purge() -} - -// Contains is used to check if the cache contains a key -// without updating recency or frequency. -func (c *ARCCache) Contains(key interface{}) bool { - c.lock.RLock() - defer c.lock.RUnlock() - return c.t1.Contains(key) || c.t2.Contains(key) -} - -// Peek is used to inspect the cache value of a key -// without updating recency or frequency. -func (c *ARCCache) Peek(key interface{}) (interface{}, bool) { - c.lock.RLock() - defer c.lock.RUnlock() - if val, ok := c.t1.Peek(key); ok { - return val, ok - } - return c.t2.Peek(key) -} diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go deleted file mode 100644 index a6285f989e0..00000000000 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ /dev/null @@ -1,114 +0,0 @@ -// This package provides a simple LRU cache. It is based on the -// LRU implementation in groupcache: -// https://github.com/golang/groupcache/tree/master/lru -package lru - -import ( - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -// Cache is a thread-safe fixed size LRU cache. -type Cache struct { - lru *simplelru.LRU - lock sync.RWMutex -} - -// New creates an LRU of the given size -func New(size int) (*Cache, error) { - return NewWithEvict(size, nil) -} - -// NewWithEvict constructs a fixed size cache with the given eviction -// callback. -func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { - lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) - if err != nil { - return nil, err - } - c := &Cache{ - lru: lru, - } - return c, nil -} - -// Purge is used to completely clear the cache -func (c *Cache) Purge() { - c.lock.Lock() - c.lru.Purge() - c.lock.Unlock() -} - -// Add adds a value to the cache. Returns true if an eviction occurred. -func (c *Cache) Add(key, value interface{}) bool { - c.lock.Lock() - defer c.lock.Unlock() - return c.lru.Add(key, value) -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key interface{}) (interface{}, bool) { - c.lock.Lock() - defer c.lock.Unlock() - return c.lru.Get(key) -} - -// Check if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. -func (c *Cache) Contains(key interface{}) bool { - c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Contains(key) -} - -// Returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *Cache) Peek(key interface{}) (interface{}, bool) { - c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Peek(key) -} - -// ContainsOrAdd checks if a key is in the cache without updating the -// recent-ness or deleting it for being stale, and if not, adds the value. -// Returns whether found and whether an eviction occurred. -func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) { - c.lock.Lock() - defer c.lock.Unlock() - - if c.lru.Contains(key) { - return true, false - } else { - evict := c.lru.Add(key, value) - return false, evict - } -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key interface{}) { - c.lock.Lock() - c.lru.Remove(key) - c.lock.Unlock() -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - c.lock.Lock() - c.lru.RemoveOldest() - c.lock.Unlock() -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *Cache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Keys() -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.lru.Len() -} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go deleted file mode 100644 index 68d097a1c06..00000000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ /dev/null @@ -1,160 +0,0 @@ -package simplelru - -import ( - "container/list" - "errors" -) - -// EvictCallback is used to get a callback when a cache entry is evicted -type EvictCallback func(key interface{}, value interface{}) - -// LRU implements a non-thread safe fixed size LRU cache -type LRU struct { - size int - evictList *list.List - items map[interface{}]*list.Element - onEvict EvictCallback -} - -// entry is used to hold a value in the evictList -type entry struct { - key interface{} - value interface{} -} - -// NewLRU constructs an LRU of the given size -func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { - if size <= 0 { - return nil, errors.New("Must provide a positive size") - } - c := &LRU{ - size: size, - evictList: list.New(), - items: make(map[interface{}]*list.Element), - onEvict: onEvict, - } - return c, nil -} - -// Purge is used to completely clear the cache -func (c *LRU) Purge() { - for k, v := range c.items { - if c.onEvict != nil { - c.onEvict(k, v.Value.(*entry).value) - } - delete(c.items, k) - } - c.evictList.Init() -} - -// Add adds a value to the cache. Returns true if an eviction occured. -func (c *LRU) Add(key, value interface{}) bool { - // Check for existing item - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - ent.Value.(*entry).value = value - return false - } - - // Add new item - ent := &entry{key, value} - entry := c.evictList.PushFront(ent) - c.items[key] = entry - - evict := c.evictList.Len() > c.size - // Verify size not exceeded - if evict { - c.removeOldest() - } - return evict -} - -// Get looks up a key's value from the cache. -func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - return ent.Value.(*entry).value, true - } - return -} - -// Check if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. -func (c *LRU) Contains(key interface{}) (ok bool) { - _, ok = c.items[key] - return ok -} - -// Returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { - return ent.Value.(*entry).value, true - } - return nil, ok -} - -// Remove removes the provided key from the cache, returning if the -// key was contained. -func (c *LRU) Remove(key interface{}) bool { - if ent, ok := c.items[key]; ok { - c.removeElement(ent) - return true - } - return false -} - -// RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// GetOldest returns the oldest entry -func (c *LRU) GetOldest() (interface{}, interface{}, bool) { - ent := c.evictList.Back() - if ent != nil { - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *LRU) Keys() []interface{} { - keys := make([]interface{}, len(c.items)) - i := 0 - for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { - keys[i] = ent.Value.(*entry).key - i++ - } - return keys -} - -// Len returns the number of items in the cache. -func (c *LRU) Len() int { - return c.evictList.Len() -} - -// removeOldest removes the oldest item from the cache. -func (c *LRU) removeOldest() { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - } -} - -// removeElement is used to remove a given list element from the cache -func (c *LRU) removeElement(e *list.Element) { - c.evictList.Remove(e) - kv := e.Value.(*entry) - delete(c.items, kv.key) - if c.onEvict != nil { - c.onEvict(kv.key, kv.value) - } -} diff --git a/vendor/github.com/howeyc/gopass/LICENSE.txt b/vendor/github.com/howeyc/gopass/LICENSE.txt deleted file mode 100644 index 14f74708a4a..00000000000 --- a/vendor/github.com/howeyc/gopass/LICENSE.txt +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012 Chris Howey - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/howeyc/gopass/pass.go b/vendor/github.com/howeyc/gopass/pass.go deleted file mode 100644 index f5bd5a51a8c..00000000000 --- a/vendor/github.com/howeyc/gopass/pass.go +++ /dev/null @@ -1,110 +0,0 @@ -package gopass - -import ( - "errors" - "fmt" - "io" - "os" -) - -type FdReader interface { - io.Reader - Fd() uintptr -} - -var defaultGetCh = func(r io.Reader) (byte, error) { - buf := make([]byte, 1) - if n, err := r.Read(buf); n == 0 || err != nil { - if err != nil { - return 0, err - } - return 0, io.EOF - } - return buf[0], nil -} - -var ( - maxLength = 512 - ErrInterrupted = errors.New("interrupted") - ErrMaxLengthExceeded = fmt.Errorf("maximum byte limit (%v) exceeded", maxLength) - - // Provide variable so that tests can provide a mock implementation. - getch = defaultGetCh -) - -// getPasswd returns the input read from terminal. -// If prompt is not empty, it will be output as a prompt to the user -// If masked is true, typing will be matched by asterisks on the screen. -// Otherwise, typing will echo nothing. -func getPasswd(prompt string, masked bool, r FdReader, w io.Writer) ([]byte, error) { - var err error - var pass, bs, mask []byte - if masked { - bs = []byte("\b \b") - mask = []byte("*") - } - - if isTerminal(r.Fd()) { - if oldState, err := makeRaw(r.Fd()); err != nil { - return pass, err - } else { - defer func() { - restore(r.Fd(), oldState) - fmt.Fprintln(w) - }() - } - } - - if prompt != "" { - fmt.Fprint(w, prompt) - } - - // Track total bytes read, not just bytes in the password. This ensures any - // errors that might flood the console with nil or -1 bytes infinitely are - // capped. - var counter int - for counter = 0; counter <= maxLength; counter++ { - if v, e := getch(r); e != nil { - err = e - break - } else if v == 127 || v == 8 { - if l := len(pass); l > 0 { - pass = pass[:l-1] - fmt.Fprint(w, string(bs)) - } - } else if v == 13 || v == 10 { - break - } else if v == 3 { - err = ErrInterrupted - break - } else if v != 0 { - pass = append(pass, v) - fmt.Fprint(w, string(mask)) - } - } - - if counter > maxLength { - err = ErrMaxLengthExceeded - } - - return pass, err -} - -// GetPasswd returns the password read from the terminal without echoing input. -// The returned byte array does not include end-of-line characters. -func GetPasswd() ([]byte, error) { - return getPasswd("", false, os.Stdin, os.Stdout) -} - -// GetPasswdMasked returns the password read from the terminal, echoing asterisks. -// The returned byte array does not include end-of-line characters. -func GetPasswdMasked() ([]byte, error) { - return getPasswd("", true, os.Stdin, os.Stdout) -} - -// GetPasswdPrompt prompts the user and returns the password read from the terminal. -// If mask is true, then asterisks are echoed. -// The returned byte array does not include end-of-line characters. -func GetPasswdPrompt(prompt string, mask bool, r FdReader, w io.Writer) ([]byte, error) { - return getPasswd(prompt, mask, r, w) -} diff --git a/vendor/github.com/howeyc/gopass/terminal.go b/vendor/github.com/howeyc/gopass/terminal.go deleted file mode 100644 index 08356414623..00000000000 --- a/vendor/github.com/howeyc/gopass/terminal.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build !solaris - -package gopass - -import "golang.org/x/crypto/ssh/terminal" - -type terminalState struct { - state *terminal.State -} - -func isTerminal(fd uintptr) bool { - return terminal.IsTerminal(int(fd)) -} - -func makeRaw(fd uintptr) (*terminalState, error) { - state, err := terminal.MakeRaw(int(fd)) - - return &terminalState{ - state: state, - }, err -} - -func restore(fd uintptr, oldState *terminalState) error { - return terminal.Restore(int(fd), oldState.state) -} diff --git a/vendor/github.com/howeyc/gopass/terminal_solaris.go b/vendor/github.com/howeyc/gopass/terminal_solaris.go deleted file mode 100644 index 257e1b4e81e..00000000000 --- a/vendor/github.com/howeyc/gopass/terminal_solaris.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * CDDL HEADER START - * - * The contents of this file are subject to the terms of the - * Common Development and Distribution License, Version 1.0 only - * (the "License"). You may not use this file except in compliance - * with the License. - * - * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE - * or http://www.opensolaris.org/os/licensing. - * See the License for the specific language governing permissions - * and limitations under the License. - * - * When distributing Covered Code, include this CDDL HEADER in each - * file and include the License file at usr/src/OPENSOLARIS.LICENSE. - * If applicable, add the following below this CDDL HEADER, with the - * fields enclosed by brackets "[]" replaced with your own identifying - * information: Portions Copyright [yyyy] [name of copyright owner] - * - * CDDL HEADER END - */ -// Below is derived from Solaris source, so CDDL license is included. - -package gopass - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -type terminalState struct { - state *unix.Termios -} - -// isTerminal returns true if there is a terminal attached to the given -// file descriptor. -// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c -func isTerminal(fd uintptr) bool { - var termio unix.Termio - err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) - return err == nil -} - -// makeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c -func makeRaw(fd uintptr) (*terminalState, error) { - oldTermiosPtr, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) - if err != nil { - return nil, err - } - oldTermios := *oldTermiosPtr - - newTermios := oldTermios - newTermios.Lflag &^= syscall.ECHO | syscall.ECHOE | syscall.ECHOK | syscall.ECHONL - if err := unix.IoctlSetTermios(int(fd), unix.TCSETS, &newTermios); err != nil { - return nil, err - } - - return &terminalState{ - state: oldTermiosPtr, - }, nil -} - -func restore(fd uintptr, oldState *terminalState) error { - return unix.IoctlSetTermios(int(fd), unix.TCSETS, oldState.state) -} diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298da..00000000000 --- a/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index 6e9aa7baf35..00000000000 --- a/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. - -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Usage - -From my own work-in-progress project: - - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 - } - - type FssnConfig struct { - Network networkConfig - } - - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, - } - - // Inside a function [...] - - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) - } - - // More code [...] - -*/ -package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index 44361e88beb..00000000000 --- a/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || isEmptyValue(reflect.ValueOf(v)) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil { - return - } - } else { - if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}) error { - var ( - vDst, vSrc reflect.Value - err error - ) - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0) -} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 5d328b1fe77..00000000000 --- a/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "reflect" -) - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - switch dst.Kind() { - case reflect.Struct: - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1); err != nil { - return - } - } - case reflect.Map: - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Map: - if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil { - return - } - } - if !dstElement.IsValid() { - dst.SetMapIndex(key, srcElement) - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if src.IsNil() { - break - } else if dst.IsNil() { - if dst.CanSet() && isEmptyValue(dst) { - dst.Set(src) - } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1); err != nil { - return - } - default: - if dst.CanSet() && !isEmptyValue(src) { - dst.Set(src) - } - } - return -} - -// Merge sets fields' values in dst from src if they have a zero -// value of their type. -// dst and src must be valid same-type structs and dst must be -// a pointer to struct. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -func Merge(dst, src interface{}) error { - var ( - vDst, vSrc reflect.Value - err error - ) - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0) -} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index f8a0991ec63..00000000000 --- a/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - ptr uintptr - typ reflect.Type - next *visit -} - -// From src/pkg/encoding/json. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE deleted file mode 100644 index 2cf4f5ab28e..00000000000 --- a/vendor/github.com/json-iterator/go/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 json-iterator - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/feature_adapter.go b/vendor/github.com/json-iterator/go/feature_adapter.go deleted file mode 100644 index edb477c4fd1..00000000000 --- a/vendor/github.com/json-iterator/go/feature_adapter.go +++ /dev/null @@ -1,127 +0,0 @@ -package jsoniter - -import ( - "bytes" - "io" -) - -// RawMessage to make replace json with jsoniter -type RawMessage []byte - -// Unmarshal adapts to json/encoding Unmarshal API -// -// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. -// Refer to https://godoc.org/encoding/json#Unmarshal for more information -func Unmarshal(data []byte, v interface{}) error { - return ConfigDefault.Unmarshal(data, v) -} - -func lastNotSpacePos(data []byte) int { - for i := len(data) - 1; i >= 0; i-- { - if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' { - return i + 1 - } - } - return 0 -} - -// UnmarshalFromString convenient method to read from string instead of []byte -func UnmarshalFromString(str string, v interface{}) error { - return ConfigDefault.UnmarshalFromString(str, v) -} - -// Get quick method to get value from deeply nested JSON structure -func Get(data []byte, path ...interface{}) Any { - return ConfigDefault.Get(data, path...) -} - -// Marshal adapts to json/encoding Marshal API -// -// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API -// Refer to https://godoc.org/encoding/json#Marshal for more information -func Marshal(v interface{}) ([]byte, error) { - return ConfigDefault.Marshal(v) -} - -// MarshalIndent same as json.MarshalIndent. Prefix is not supported. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - return ConfigDefault.MarshalIndent(v, prefix, indent) -} - -// MarshalToString convenient method to write as string instead of []byte -func MarshalToString(v interface{}) (string, error) { - return ConfigDefault.MarshalToString(v) -} - -// NewDecoder adapts to json/stream NewDecoder API. -// -// NewDecoder returns a new decoder that reads from r. -// -// Instead of a json/encoding Decoder, an Decoder is returned -// Refer to https://godoc.org/encoding/json#NewDecoder for more information -func NewDecoder(reader io.Reader) *Decoder { - return ConfigDefault.NewDecoder(reader) -} - -// Decoder reads and decodes JSON values from an input stream. -// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) -type Decoder struct { - iter *Iterator -} - -// Decode decode JSON into interface{} -func (adapter *Decoder) Decode(obj interface{}) error { - adapter.iter.ReadVal(obj) - err := adapter.iter.Error - if err == io.EOF { - return nil - } - return adapter.iter.Error -} - -// More is there more? -func (adapter *Decoder) More() bool { - return adapter.iter.head != adapter.iter.tail -} - -// Buffered remaining buffer -func (adapter *Decoder) Buffered() io.Reader { - remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] - return bytes.NewReader(remaining) -} - -// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string) -func (adapter *Decoder) UseNumber() { - origCfg := adapter.iter.cfg.configBeforeFrozen - origCfg.UseNumber = true - adapter.iter.cfg = origCfg.Froze().(*frozenConfig) -} - -// NewEncoder same as json.NewEncoder -func NewEncoder(writer io.Writer) *Encoder { - return ConfigDefault.NewEncoder(writer) -} - -// Encoder same as json.Encoder -type Encoder struct { - stream *Stream -} - -// Encode encode interface{} as JSON to io.Writer -func (adapter *Encoder) Encode(val interface{}) error { - adapter.stream.WriteVal(val) - adapter.stream.Flush() - return adapter.stream.Error -} - -// SetIndent set the indention. Prefix is not supported -func (adapter *Encoder) SetIndent(prefix, indent string) { - adapter.stream.cfg.indentionStep = len(indent) -} - -// SetEscapeHTML escape html by default, set to false to disable -func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { - config := adapter.stream.cfg.configBeforeFrozen - config.EscapeHTML = escapeHTML - adapter.stream.cfg = config.Froze().(*frozenConfig) -} diff --git a/vendor/github.com/json-iterator/go/feature_any.go b/vendor/github.com/json-iterator/go/feature_any.go deleted file mode 100644 index 6733dce4cc5..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any.go +++ /dev/null @@ -1,242 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" -) - -// Any generic object representation. -// The lazy json implementation holds []byte and parse lazily. -type Any interface { - LastError() error - ValueType() ValueType - MustBeValid() Any - ToBool() bool - ToInt() int - ToInt32() int32 - ToInt64() int64 - ToUint() uint - ToUint32() uint32 - ToUint64() uint64 - ToFloat32() float32 - ToFloat64() float64 - ToString() string - ToVal(val interface{}) - Get(path ...interface{}) Any - // TODO: add Set - Size() int - Keys() []string - GetInterface() interface{} - WriteTo(stream *Stream) -} - -type baseAny struct{} - -func (any *baseAny) Get(path ...interface{}) Any { - return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} -} - -func (any *baseAny) Size() int { - return 0 -} - -func (any *baseAny) Keys() []string { - return []string{} -} - -func (any *baseAny) ToVal(obj interface{}) { - panic("not implemented") -} - -// WrapInt32 turn int32 into Any interface -func WrapInt32(val int32) Any { - return &int32Any{baseAny{}, val} -} - -// WrapInt64 turn int64 into Any interface -func WrapInt64(val int64) Any { - return &int64Any{baseAny{}, val} -} - -// WrapUint32 turn uint32 into Any interface -func WrapUint32(val uint32) Any { - return &uint32Any{baseAny{}, val} -} - -// WrapUint64 turn uint64 into Any interface -func WrapUint64(val uint64) Any { - return &uint64Any{baseAny{}, val} -} - -// WrapFloat64 turn float64 into Any interface -func WrapFloat64(val float64) Any { - return &floatAny{baseAny{}, val} -} - -// WrapString turn string into Any interface -func WrapString(val string) Any { - return &stringAny{baseAny{}, val} -} - -// Wrap turn a go object into Any interface -func Wrap(val interface{}) Any { - if val == nil { - return &nilAny{} - } - asAny, isAny := val.(Any) - if isAny { - return asAny - } - typ := reflect.TypeOf(val) - switch typ.Kind() { - case reflect.Slice: - return wrapArray(val) - case reflect.Struct: - return wrapStruct(val) - case reflect.Map: - return wrapMap(val) - case reflect.String: - return WrapString(val.(string)) - case reflect.Int: - return WrapInt64(int64(val.(int))) - case reflect.Int8: - return WrapInt32(int32(val.(int8))) - case reflect.Int16: - return WrapInt32(int32(val.(int16))) - case reflect.Int32: - return WrapInt32(val.(int32)) - case reflect.Int64: - return WrapInt64(val.(int64)) - case reflect.Uint: - return WrapUint64(uint64(val.(uint))) - case reflect.Uint8: - return WrapUint32(uint32(val.(uint8))) - case reflect.Uint16: - return WrapUint32(uint32(val.(uint16))) - case reflect.Uint32: - return WrapUint32(uint32(val.(uint32))) - case reflect.Uint64: - return WrapUint64(val.(uint64)) - case reflect.Float32: - return WrapFloat64(float64(val.(float32))) - case reflect.Float64: - return WrapFloat64(val.(float64)) - case reflect.Bool: - if val.(bool) == true { - return &trueAny{} - } - return &falseAny{} - } - return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} -} - -// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. -func (iter *Iterator) ReadAny() Any { - return iter.readAny() -} - -func (iter *Iterator) readAny() Any { - c := iter.nextToken() - switch c { - case '"': - iter.unreadByte() - return &stringAny{baseAny{}, iter.ReadString()} - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - return &nilAny{} - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - return &trueAny{} - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - return &falseAny{} - case '{': - return iter.readObjectAny() - case '[': - return iter.readArrayAny() - case '-': - return iter.readNumberAny(false) - default: - return iter.readNumberAny(true) - } -} - -func (iter *Iterator) readNumberAny(positive bool) Any { - iter.startCapture(iter.head - 1) - iter.skipNumber() - lazyBuf := iter.stopCapture() - return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readObjectAny() Any { - iter.startCapture(iter.head - 1) - iter.skipObject() - lazyBuf := iter.stopCapture() - return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readArrayAny() Any { - iter.startCapture(iter.head - 1) - iter.skipArray() - lazyBuf := iter.stopCapture() - return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func locateObjectField(iter *Iterator, target string) []byte { - var found []byte - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - if field == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - return true - }) - return found -} - -func locateArrayElement(iter *Iterator, target int) []byte { - var found []byte - n := 0 - iter.ReadArrayCB(func(iter *Iterator) bool { - if n == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - n++ - return true - }) - return found -} - -func locatePath(iter *Iterator, path []interface{}) Any { - for i, pathKeyObj := range path { - switch pathKey := pathKeyObj.(type) { - case string: - valueBytes := locateObjectField(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int: - valueBytes := locateArrayElement(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int32: - if '*' == pathKey { - return iter.readAny().Get(path[i:]...) - } - return newInvalidAny(path[i:]) - default: - return newInvalidAny(path[i:]) - } - } - if iter.Error != nil && iter.Error != io.EOF { - return &invalidAny{baseAny{}, iter.Error} - } - return iter.readAny() -} diff --git a/vendor/github.com/json-iterator/go/feature_any_array.go b/vendor/github.com/json-iterator/go/feature_any_array.go deleted file mode 100644 index 0449e9aa428..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_array.go +++ /dev/null @@ -1,278 +0,0 @@ -package jsoniter - -import ( - "reflect" - "unsafe" -) - -type arrayLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *arrayLazyAny) ValueType() ValueType { - return ArrayValue -} - -func (any *arrayLazyAny) MustBeValid() Any { - return any -} - -func (any *arrayLazyAny) LastError() error { - return any.err -} - -func (any *arrayLazyAny) ToBool() bool { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.ReadArray() -} - -func (any *arrayLazyAny) ToInt() int { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToInt32() int32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToInt64() int64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint() uint { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint32() uint32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint64() uint64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToFloat32() float32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToFloat64() float64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *arrayLazyAny) ToVal(val interface{}) { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadVal(val) -} - -func (any *arrayLazyAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int: - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - valueBytes := locateArrayElement(iter, firstPath) - if valueBytes == nil { - return newInvalidAny(path) - } - iter.ResetBytes(valueBytes) - return locatePath(iter, path[1:]) - case int32: - if '*' == firstPath { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - arr := make([]Any, 0) - iter.ReadArrayCB(func(iter *Iterator) bool { - found := iter.readAny().Get(path[1:]...) - if found.ValueType() != InvalidValue { - arr = append(arr, found) - } - return true - }) - return wrapArray(arr) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *arrayLazyAny) Size() int { - size := 0 - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadArrayCB(func(iter *Iterator) bool { - size++ - iter.Skip() - return true - }) - return size -} - -func (any *arrayLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *arrayLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} - -type arrayAny struct { - baseAny - val reflect.Value -} - -func wrapArray(val interface{}) *arrayAny { - return &arrayAny{baseAny{}, reflect.ValueOf(val)} -} - -func (any *arrayAny) ValueType() ValueType { - return ArrayValue -} - -func (any *arrayAny) MustBeValid() Any { - return any -} - -func (any *arrayAny) LastError() error { - return nil -} - -func (any *arrayAny) ToBool() bool { - return any.val.Len() != 0 -} - -func (any *arrayAny) ToInt() int { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToInt32() int32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToInt64() int64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint() uint { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint32() uint32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint64() uint64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToFloat32() float32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToFloat64() float64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToString() string { - str, _ := MarshalToString(any.val.Interface()) - return str -} - -func (any *arrayAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int: - if firstPath < 0 || firstPath >= any.val.Len() { - return newInvalidAny(path) - } - return Wrap(any.val.Index(firstPath).Interface()) - case int32: - if '*' == firstPath { - mappedAll := make([]Any, 0) - for i := 0; i < any.val.Len(); i++ { - mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll = append(mappedAll, mapped) - } - } - return wrapArray(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *arrayAny) Size() int { - return any.val.Len() -} - -func (any *arrayAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *arrayAny) GetInterface() interface{} { - return any.val.Interface() -} diff --git a/vendor/github.com/json-iterator/go/feature_any_bool.go b/vendor/github.com/json-iterator/go/feature_any_bool.go deleted file mode 100644 index 9452324af5b..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_bool.go +++ /dev/null @@ -1,137 +0,0 @@ -package jsoniter - -type trueAny struct { - baseAny -} - -func (any *trueAny) LastError() error { - return nil -} - -func (any *trueAny) ToBool() bool { - return true -} - -func (any *trueAny) ToInt() int { - return 1 -} - -func (any *trueAny) ToInt32() int32 { - return 1 -} - -func (any *trueAny) ToInt64() int64 { - return 1 -} - -func (any *trueAny) ToUint() uint { - return 1 -} - -func (any *trueAny) ToUint32() uint32 { - return 1 -} - -func (any *trueAny) ToUint64() uint64 { - return 1 -} - -func (any *trueAny) ToFloat32() float32 { - return 1 -} - -func (any *trueAny) ToFloat64() float64 { - return 1 -} - -func (any *trueAny) ToString() string { - return "true" -} - -func (any *trueAny) WriteTo(stream *Stream) { - stream.WriteTrue() -} - -func (any *trueAny) Parse() *Iterator { - return nil -} - -func (any *trueAny) GetInterface() interface{} { - return true -} - -func (any *trueAny) ValueType() ValueType { - return BoolValue -} - -func (any *trueAny) MustBeValid() Any { - return any -} - -type falseAny struct { - baseAny -} - -func (any *falseAny) LastError() error { - return nil -} - -func (any *falseAny) ToBool() bool { - return false -} - -func (any *falseAny) ToInt() int { - return 0 -} - -func (any *falseAny) ToInt32() int32 { - return 0 -} - -func (any *falseAny) ToInt64() int64 { - return 0 -} - -func (any *falseAny) ToUint() uint { - return 0 -} - -func (any *falseAny) ToUint32() uint32 { - return 0 -} - -func (any *falseAny) ToUint64() uint64 { - return 0 -} - -func (any *falseAny) ToFloat32() float32 { - return 0 -} - -func (any *falseAny) ToFloat64() float64 { - return 0 -} - -func (any *falseAny) ToString() string { - return "false" -} - -func (any *falseAny) WriteTo(stream *Stream) { - stream.WriteFalse() -} - -func (any *falseAny) Parse() *Iterator { - return nil -} - -func (any *falseAny) GetInterface() interface{} { - return false -} - -func (any *falseAny) ValueType() ValueType { - return BoolValue -} - -func (any *falseAny) MustBeValid() Any { - return any -} diff --git a/vendor/github.com/json-iterator/go/feature_any_float.go b/vendor/github.com/json-iterator/go/feature_any_float.go deleted file mode 100644 index 35fdb09497f..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_float.go +++ /dev/null @@ -1,83 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type floatAny struct { - baseAny - val float64 -} - -func (any *floatAny) Parse() *Iterator { - return nil -} - -func (any *floatAny) ValueType() ValueType { - return NumberValue -} - -func (any *floatAny) MustBeValid() Any { - return any -} - -func (any *floatAny) LastError() error { - return nil -} - -func (any *floatAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *floatAny) ToInt() int { - return int(any.val) -} - -func (any *floatAny) ToInt32() int32 { - return int32(any.val) -} - -func (any *floatAny) ToInt64() int64 { - return int64(any.val) -} - -func (any *floatAny) ToUint() uint { - if any.val > 0 { - return uint(any.val) - } - return 0 -} - -func (any *floatAny) ToUint32() uint32 { - if any.val > 0 { - return uint32(any.val) - } - return 0 -} - -func (any *floatAny) ToUint64() uint64 { - if any.val > 0 { - return uint64(any.val) - } - return 0 -} - -func (any *floatAny) ToFloat32() float32 { - return float32(any.val) -} - -func (any *floatAny) ToFloat64() float64 { - return any.val -} - -func (any *floatAny) ToString() string { - return strconv.FormatFloat(any.val, 'E', -1, 64) -} - -func (any *floatAny) WriteTo(stream *Stream) { - stream.WriteFloat64(any.val) -} - -func (any *floatAny) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/feature_any_int32.go b/vendor/github.com/json-iterator/go/feature_any_int32.go deleted file mode 100644 index 1b56f399150..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_int32.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type int32Any struct { - baseAny - val int32 -} - -func (any *int32Any) LastError() error { - return nil -} - -func (any *int32Any) ValueType() ValueType { - return NumberValue -} - -func (any *int32Any) MustBeValid() Any { - return any -} - -func (any *int32Any) ToBool() bool { - return any.val != 0 -} - -func (any *int32Any) ToInt() int { - return int(any.val) -} - -func (any *int32Any) ToInt32() int32 { - return any.val -} - -func (any *int32Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *int32Any) ToUint() uint { - return uint(any.val) -} - -func (any *int32Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *int32Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *int32Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *int32Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *int32Any) ToString() string { - return strconv.FormatInt(int64(any.val), 10) -} - -func (any *int32Any) WriteTo(stream *Stream) { - stream.WriteInt32(any.val) -} - -func (any *int32Any) Parse() *Iterator { - return nil -} - -func (any *int32Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/feature_any_int64.go b/vendor/github.com/json-iterator/go/feature_any_int64.go deleted file mode 100644 index c440d72b6d3..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_int64.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type int64Any struct { - baseAny - val int64 -} - -func (any *int64Any) LastError() error { - return nil -} - -func (any *int64Any) ValueType() ValueType { - return NumberValue -} - -func (any *int64Any) MustBeValid() Any { - return any -} - -func (any *int64Any) ToBool() bool { - return any.val != 0 -} - -func (any *int64Any) ToInt() int { - return int(any.val) -} - -func (any *int64Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *int64Any) ToInt64() int64 { - return any.val -} - -func (any *int64Any) ToUint() uint { - return uint(any.val) -} - -func (any *int64Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *int64Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *int64Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *int64Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *int64Any) ToString() string { - return strconv.FormatInt(any.val, 10) -} - -func (any *int64Any) WriteTo(stream *Stream) { - stream.WriteInt64(any.val) -} - -func (any *int64Any) Parse() *Iterator { - return nil -} - -func (any *int64Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/feature_any_invalid.go b/vendor/github.com/json-iterator/go/feature_any_invalid.go deleted file mode 100644 index 1d859eac327..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_invalid.go +++ /dev/null @@ -1,82 +0,0 @@ -package jsoniter - -import "fmt" - -type invalidAny struct { - baseAny - err error -} - -func newInvalidAny(path []interface{}) *invalidAny { - return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} -} - -func (any *invalidAny) LastError() error { - return any.err -} - -func (any *invalidAny) ValueType() ValueType { - return InvalidValue -} - -func (any *invalidAny) MustBeValid() Any { - panic(any.err) -} - -func (any *invalidAny) ToBool() bool { - return false -} - -func (any *invalidAny) ToInt() int { - return 0 -} - -func (any *invalidAny) ToInt32() int32 { - return 0 -} - -func (any *invalidAny) ToInt64() int64 { - return 0 -} - -func (any *invalidAny) ToUint() uint { - return 0 -} - -func (any *invalidAny) ToUint32() uint32 { - return 0 -} - -func (any *invalidAny) ToUint64() uint64 { - return 0 -} - -func (any *invalidAny) ToFloat32() float32 { - return 0 -} - -func (any *invalidAny) ToFloat64() float64 { - return 0 -} - -func (any *invalidAny) ToString() string { - return "" -} - -func (any *invalidAny) WriteTo(stream *Stream) { -} - -func (any *invalidAny) Get(path ...interface{}) Any { - if any.err == nil { - return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} - } - return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} -} - -func (any *invalidAny) Parse() *Iterator { - return nil -} - -func (any *invalidAny) GetInterface() interface{} { - return nil -} diff --git a/vendor/github.com/json-iterator/go/feature_any_nil.go b/vendor/github.com/json-iterator/go/feature_any_nil.go deleted file mode 100644 index d04cb54c11c..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_nil.go +++ /dev/null @@ -1,69 +0,0 @@ -package jsoniter - -type nilAny struct { - baseAny -} - -func (any *nilAny) LastError() error { - return nil -} - -func (any *nilAny) ValueType() ValueType { - return NilValue -} - -func (any *nilAny) MustBeValid() Any { - return any -} - -func (any *nilAny) ToBool() bool { - return false -} - -func (any *nilAny) ToInt() int { - return 0 -} - -func (any *nilAny) ToInt32() int32 { - return 0 -} - -func (any *nilAny) ToInt64() int64 { - return 0 -} - -func (any *nilAny) ToUint() uint { - return 0 -} - -func (any *nilAny) ToUint32() uint32 { - return 0 -} - -func (any *nilAny) ToUint64() uint64 { - return 0 -} - -func (any *nilAny) ToFloat32() float32 { - return 0 -} - -func (any *nilAny) ToFloat64() float64 { - return 0 -} - -func (any *nilAny) ToString() string { - return "" -} - -func (any *nilAny) WriteTo(stream *Stream) { - stream.WriteNil() -} - -func (any *nilAny) Parse() *Iterator { - return nil -} - -func (any *nilAny) GetInterface() interface{} { - return nil -} diff --git a/vendor/github.com/json-iterator/go/feature_any_number.go b/vendor/github.com/json-iterator/go/feature_any_number.go deleted file mode 100644 index 4e1c27641d2..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_number.go +++ /dev/null @@ -1,104 +0,0 @@ -package jsoniter - -import "unsafe" - -type numberLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *numberLazyAny) ValueType() ValueType { - return NumberValue -} - -func (any *numberLazyAny) MustBeValid() Any { - return any -} - -func (any *numberLazyAny) LastError() error { - return any.err -} - -func (any *numberLazyAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *numberLazyAny) ToInt() int { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToInt32() int32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt32() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToInt64() int64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt64() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToUint() uint { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToUint32() uint32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint32() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToUint64() uint64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint64() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToFloat32() float32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat32() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToFloat64() float64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat64() - any.err = iter.Error - return val -} - -func (any *numberLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *numberLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *numberLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} diff --git a/vendor/github.com/json-iterator/go/feature_any_object.go b/vendor/github.com/json-iterator/go/feature_any_object.go deleted file mode 100644 index c44ef5c989a..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_object.go +++ /dev/null @@ -1,374 +0,0 @@ -package jsoniter - -import ( - "reflect" - "unsafe" -) - -type objectLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *objectLazyAny) ValueType() ValueType { - return ObjectValue -} - -func (any *objectLazyAny) MustBeValid() Any { - return any -} - -func (any *objectLazyAny) LastError() error { - return any.err -} - -func (any *objectLazyAny) ToBool() bool { - return true -} - -func (any *objectLazyAny) ToInt() int { - return 0 -} - -func (any *objectLazyAny) ToInt32() int32 { - return 0 -} - -func (any *objectLazyAny) ToInt64() int64 { - return 0 -} - -func (any *objectLazyAny) ToUint() uint { - return 0 -} - -func (any *objectLazyAny) ToUint32() uint32 { - return 0 -} - -func (any *objectLazyAny) ToUint64() uint64 { - return 0 -} - -func (any *objectLazyAny) ToFloat32() float32 { - return 0 -} - -func (any *objectLazyAny) ToFloat64() float64 { - return 0 -} - -func (any *objectLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *objectLazyAny) ToVal(obj interface{}) { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadVal(obj) -} - -func (any *objectLazyAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case string: - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - valueBytes := locateObjectField(iter, firstPath) - if valueBytes == nil { - return newInvalidAny(path) - } - iter.ResetBytes(valueBytes) - return locatePath(iter, path[1:]) - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadMapCB(func(iter *Iterator, field string) bool { - mapped := locatePath(iter, path[1:]) - if mapped.ValueType() != InvalidValue { - mappedAll[field] = mapped - } - return true - }) - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *objectLazyAny) Keys() []string { - keys := []string{} - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadMapCB(func(iter *Iterator, field string) bool { - iter.Skip() - keys = append(keys, field) - return true - }) - return keys -} - -func (any *objectLazyAny) Size() int { - size := 0 - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - size++ - return true - }) - return size -} - -func (any *objectLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *objectLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} - -type objectAny struct { - baseAny - err error - val reflect.Value -} - -func wrapStruct(val interface{}) *objectAny { - return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} -} - -func (any *objectAny) ValueType() ValueType { - return ObjectValue -} - -func (any *objectAny) MustBeValid() Any { - return any -} - -func (any *objectAny) Parse() *Iterator { - return nil -} - -func (any *objectAny) LastError() error { - return any.err -} - -func (any *objectAny) ToBool() bool { - return any.val.NumField() != 0 -} - -func (any *objectAny) ToInt() int { - return 0 -} - -func (any *objectAny) ToInt32() int32 { - return 0 -} - -func (any *objectAny) ToInt64() int64 { - return 0 -} - -func (any *objectAny) ToUint() uint { - return 0 -} - -func (any *objectAny) ToUint32() uint32 { - return 0 -} - -func (any *objectAny) ToUint64() uint64 { - return 0 -} - -func (any *objectAny) ToFloat32() float32 { - return 0 -} - -func (any *objectAny) ToFloat64() float64 { - return 0 -} - -func (any *objectAny) ToString() string { - str, err := MarshalToString(any.val.Interface()) - any.err = err - return str -} - -func (any *objectAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case string: - field := any.val.FieldByName(firstPath) - if !field.IsValid() { - return newInvalidAny(path) - } - return Wrap(field.Interface()) - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - for i := 0; i < any.val.NumField(); i++ { - field := any.val.Field(i) - if field.CanInterface() { - mapped := Wrap(field.Interface()).Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll[any.val.Type().Field(i).Name] = mapped - } - } - } - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *objectAny) Keys() []string { - keys := make([]string, 0, any.val.NumField()) - for i := 0; i < any.val.NumField(); i++ { - keys = append(keys, any.val.Type().Field(i).Name) - } - return keys -} - -func (any *objectAny) Size() int { - return any.val.NumField() -} - -func (any *objectAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *objectAny) GetInterface() interface{} { - return any.val.Interface() -} - -type mapAny struct { - baseAny - err error - val reflect.Value -} - -func wrapMap(val interface{}) *mapAny { - return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} -} - -func (any *mapAny) ValueType() ValueType { - return ObjectValue -} - -func (any *mapAny) MustBeValid() Any { - return any -} - -func (any *mapAny) Parse() *Iterator { - return nil -} - -func (any *mapAny) LastError() error { - return any.err -} - -func (any *mapAny) ToBool() bool { - return true -} - -func (any *mapAny) ToInt() int { - return 0 -} - -func (any *mapAny) ToInt32() int32 { - return 0 -} - -func (any *mapAny) ToInt64() int64 { - return 0 -} - -func (any *mapAny) ToUint() uint { - return 0 -} - -func (any *mapAny) ToUint32() uint32 { - return 0 -} - -func (any *mapAny) ToUint64() uint64 { - return 0 -} - -func (any *mapAny) ToFloat32() float32 { - return 0 -} - -func (any *mapAny) ToFloat64() float64 { - return 0 -} - -func (any *mapAny) ToString() string { - str, err := MarshalToString(any.val.Interface()) - any.err = err - return str -} - -func (any *mapAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - for _, key := range any.val.MapKeys() { - keyAsStr := key.String() - element := Wrap(any.val.MapIndex(key).Interface()) - mapped := element.Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll[keyAsStr] = mapped - } - } - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - value := any.val.MapIndex(reflect.ValueOf(firstPath)) - if !value.IsValid() { - return newInvalidAny(path) - } - return Wrap(value.Interface()) - } -} - -func (any *mapAny) Keys() []string { - keys := make([]string, 0, any.val.Len()) - for _, key := range any.val.MapKeys() { - keys = append(keys, key.String()) - } - return keys -} - -func (any *mapAny) Size() int { - return any.val.Len() -} - -func (any *mapAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *mapAny) GetInterface() interface{} { - return any.val.Interface() -} diff --git a/vendor/github.com/json-iterator/go/feature_any_string.go b/vendor/github.com/json-iterator/go/feature_any_string.go deleted file mode 100644 index abf060bd59a..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_string.go +++ /dev/null @@ -1,166 +0,0 @@ -package jsoniter - -import ( - "fmt" - "strconv" -) - -type stringAny struct { - baseAny - val string -} - -func (any *stringAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} -} - -func (any *stringAny) Parse() *Iterator { - return nil -} - -func (any *stringAny) ValueType() ValueType { - return StringValue -} - -func (any *stringAny) MustBeValid() Any { - return any -} - -func (any *stringAny) LastError() error { - return nil -} - -func (any *stringAny) ToBool() bool { - str := any.ToString() - if str == "0" { - return false - } - for _, c := range str { - switch c { - case ' ', '\n', '\r', '\t': - default: - return true - } - } - return false -} - -func (any *stringAny) ToInt() int { - return int(any.ToInt64()) - -} - -func (any *stringAny) ToInt32() int32 { - return int32(any.ToInt64()) -} - -func (any *stringAny) ToInt64() int64 { - if any.val == "" { - return 0 - } - - flag := 1 - startPos := 0 - endPos := 0 - if any.val[0] == '+' || any.val[0] == '-' { - startPos = 1 - } - - if any.val[0] == '-' { - flag = -1 - } - - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) - return int64(flag) * parsed -} - -func (any *stringAny) ToUint() uint { - return uint(any.ToUint64()) -} - -func (any *stringAny) ToUint32() uint32 { - return uint32(any.ToUint64()) -} - -func (any *stringAny) ToUint64() uint64 { - if any.val == "" { - return 0 - } - - startPos := 0 - endPos := 0 - - if any.val[0] == '-' { - return 0 - } - if any.val[0] == '+' { - startPos = 1 - } - - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) - return parsed -} - -func (any *stringAny) ToFloat32() float32 { - return float32(any.ToFloat64()) -} - -func (any *stringAny) ToFloat64() float64 { - if len(any.val) == 0 { - return 0 - } - - // first char invalid - if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { - return 0 - } - - // extract valid num expression from string - // eg 123true => 123, -12.12xxa => -12.12 - endPos := 1 - for i := 1; i < len(any.val); i++ { - if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { - endPos = i + 1 - continue - } - - // end position is the first char which is not digit - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - endPos = i - break - } - } - parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) - return parsed -} - -func (any *stringAny) ToString() string { - return any.val -} - -func (any *stringAny) WriteTo(stream *Stream) { - stream.WriteString(any.val) -} - -func (any *stringAny) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint32.go b/vendor/github.com/json-iterator/go/feature_any_uint32.go deleted file mode 100644 index 656bbd33d7e..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_uint32.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type uint32Any struct { - baseAny - val uint32 -} - -func (any *uint32Any) LastError() error { - return nil -} - -func (any *uint32Any) ValueType() ValueType { - return NumberValue -} - -func (any *uint32Any) MustBeValid() Any { - return any -} - -func (any *uint32Any) ToBool() bool { - return any.val != 0 -} - -func (any *uint32Any) ToInt() int { - return int(any.val) -} - -func (any *uint32Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *uint32Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *uint32Any) ToUint() uint { - return uint(any.val) -} - -func (any *uint32Any) ToUint32() uint32 { - return any.val -} - -func (any *uint32Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *uint32Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *uint32Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *uint32Any) ToString() string { - return strconv.FormatInt(int64(any.val), 10) -} - -func (any *uint32Any) WriteTo(stream *Stream) { - stream.WriteUint32(any.val) -} - -func (any *uint32Any) Parse() *Iterator { - return nil -} - -func (any *uint32Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint64.go b/vendor/github.com/json-iterator/go/feature_any_uint64.go deleted file mode 100644 index 7df2fce33ba..00000000000 --- a/vendor/github.com/json-iterator/go/feature_any_uint64.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type uint64Any struct { - baseAny - val uint64 -} - -func (any *uint64Any) LastError() error { - return nil -} - -func (any *uint64Any) ValueType() ValueType { - return NumberValue -} - -func (any *uint64Any) MustBeValid() Any { - return any -} - -func (any *uint64Any) ToBool() bool { - return any.val != 0 -} - -func (any *uint64Any) ToInt() int { - return int(any.val) -} - -func (any *uint64Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *uint64Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *uint64Any) ToUint() uint { - return uint(any.val) -} - -func (any *uint64Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *uint64Any) ToUint64() uint64 { - return any.val -} - -func (any *uint64Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *uint64Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *uint64Any) ToString() string { - return strconv.FormatUint(any.val, 10) -} - -func (any *uint64Any) WriteTo(stream *Stream) { - stream.WriteUint64(any.val) -} - -func (any *uint64Any) Parse() *Iterator { - return nil -} - -func (any *uint64Any) GetInterface() interface{} { - return any.val -} diff --git a/vendor/github.com/json-iterator/go/feature_config.go b/vendor/github.com/json-iterator/go/feature_config.go deleted file mode 100644 index fc055d504eb..00000000000 --- a/vendor/github.com/json-iterator/go/feature_config.go +++ /dev/null @@ -1,312 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "errors" - "io" - "reflect" - "sync/atomic" - "unsafe" -) - -// Config customize how the API should behave. -// The API is created from Config by Froze. -type Config struct { - IndentionStep int - MarshalFloatWith6Digits bool - EscapeHTML bool - SortMapKeys bool - UseNumber bool - TagKey string -} - -type frozenConfig struct { - configBeforeFrozen Config - sortMapKeys bool - indentionStep int - decoderCache unsafe.Pointer - encoderCache unsafe.Pointer - extensions []Extension - streamPool chan *Stream - iteratorPool chan *Iterator -} - -// API the public interface of this package. -// Primary Marshal and Unmarshal. -type API interface { - IteratorPool - StreamPool - MarshalToString(v interface{}) (string, error) - Marshal(v interface{}) ([]byte, error) - MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) - UnmarshalFromString(str string, v interface{}) error - Unmarshal(data []byte, v interface{}) error - Get(data []byte, path ...interface{}) Any - NewEncoder(writer io.Writer) *Encoder - NewDecoder(reader io.Reader) *Decoder -} - -// ConfigDefault the default API -var ConfigDefault = Config{ - EscapeHTML: true, -}.Froze() - -// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior -var ConfigCompatibleWithStandardLibrary = Config{ - EscapeHTML: true, - SortMapKeys: true, -}.Froze() - -// ConfigFastest marshals float with only 6 digits precision -var ConfigFastest = Config{ - EscapeHTML: false, - MarshalFloatWith6Digits: true, -}.Froze() - -// Froze forge API from config -func (cfg Config) Froze() API { - // TODO: cache frozen config - frozenConfig := &frozenConfig{ - sortMapKeys: cfg.SortMapKeys, - indentionStep: cfg.IndentionStep, - streamPool: make(chan *Stream, 16), - iteratorPool: make(chan *Iterator, 16), - } - atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{})) - atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{})) - if cfg.MarshalFloatWith6Digits { - frozenConfig.marshalFloatWith6Digits() - } - if cfg.EscapeHTML { - frozenConfig.escapeHTML() - } - if cfg.UseNumber { - frozenConfig.useNumber() - } - frozenConfig.configBeforeFrozen = cfg - return frozenConfig -} - -func (cfg *frozenConfig) useNumber() { - cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { - if iter.WhatIsNext() == NumberValue { - *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) - } else { - *((*interface{})(ptr)) = iter.Read() - } - }}) -} -func (cfg *frozenConfig) getTagKey() string { - tagKey := cfg.configBeforeFrozen.TagKey - if tagKey == "" { - return "json" - } - return tagKey -} - -func (cfg *frozenConfig) registerExtension(extension Extension) { - cfg.extensions = append(cfg.extensions, extension) -} - -type lossyFloat32Encoder struct { -} - -func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32Lossy(*((*float32)(ptr))) -} - -func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type lossyFloat64Encoder struct { -} - -func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64Lossy(*((*float64)(ptr))) -} - -func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -// EnableLossyFloatMarshalling keeps 10**(-6) precision -// for float variables for better performance. -func (cfg *frozenConfig) marshalFloatWith6Digits() { - // for better performance - cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{}) - cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{}) -} - -type htmlEscapedStringEncoder struct { -} - -func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteStringWithHTMLEscaped(str) -} - -func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -func (cfg *frozenConfig) escapeHTML() { - cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{}) -} - -func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { - done := false - for !done { - ptr := atomic.LoadPointer(&cfg.decoderCache) - cache := *(*map[reflect.Type]ValDecoder)(ptr) - copied := map[reflect.Type]ValDecoder{} - for k, v := range cache { - copied[k] = v - } - copied[cacheKey] = decoder - done = atomic.CompareAndSwapPointer(&cfg.decoderCache, ptr, unsafe.Pointer(&copied)) - } -} - -func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { - done := false - for !done { - ptr := atomic.LoadPointer(&cfg.encoderCache) - cache := *(*map[reflect.Type]ValEncoder)(ptr) - copied := map[reflect.Type]ValEncoder{} - for k, v := range cache { - copied[k] = v - } - copied[cacheKey] = encoder - done = atomic.CompareAndSwapPointer(&cfg.encoderCache, ptr, unsafe.Pointer(&copied)) - } -} - -func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { - ptr := atomic.LoadPointer(&cfg.decoderCache) - cache := *(*map[reflect.Type]ValDecoder)(ptr) - return cache[cacheKey] -} - -func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { - ptr := atomic.LoadPointer(&cfg.encoderCache) - cache := *(*map[reflect.Type]ValEncoder)(ptr) - return cache[cacheKey] -} - -func (cfg *frozenConfig) cleanDecoders() { - typeDecoders = map[string]ValDecoder{} - fieldDecoders = map[string]ValDecoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) cleanEncoders() { - typeEncoders = map[string]ValEncoder{} - fieldEncoders = map[string]ValEncoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return "", stream.Error - } - return string(stream.Buffer()), nil -} - -func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return nil, stream.Error - } - result := stream.Buffer() - copied := make([]byte, len(result)) - copy(copied, result) - return copied, nil -} - -func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - if prefix != "" { - panic("prefix is not supported") - } - for _, r := range indent { - if r != ' ' { - panic("indent can only be space") - } - } - newCfg := cfg.configBeforeFrozen - newCfg.IndentionStep = len(indent) - return newCfg.Froze().Marshal(v) -} - -func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { - data := []byte(str) - data = data[:lastNotSpacePos(data)] - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.ReadVal(v) - if iter.head == iter.tail { - iter.loadMore() - } - if iter.Error == io.EOF { - return nil - } - if iter.Error == nil { - iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal") - } - return iter.Error -} - -func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - return locatePath(iter, path) -} - -func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { - data = data[:lastNotSpacePos(data)] - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - typ := reflect.TypeOf(v) - if typ.Kind() != reflect.Ptr { - // return non-pointer error - return errors.New("the second param must be ptr type") - } - iter.ReadVal(v) - if iter.head == iter.tail { - iter.loadMore() - } - if iter.Error == io.EOF { - return nil - } - if iter.Error == nil { - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") - } - return iter.Error -} - -func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { - stream := NewStream(cfg, writer, 512) - return &Encoder{stream} -} - -func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { - iter := Parse(cfg, reader, 512) - return &Decoder{iter} -} diff --git a/vendor/github.com/json-iterator/go/feature_iter.go b/vendor/github.com/json-iterator/go/feature_iter.go deleted file mode 100644 index 4357d69bac7..00000000000 --- a/vendor/github.com/json-iterator/go/feature_iter.go +++ /dev/null @@ -1,307 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "fmt" - "io" -) - -// ValueType the type for JSON element -type ValueType int - -const ( - // InvalidValue invalid JSON element - InvalidValue ValueType = iota - // StringValue JSON element "string" - StringValue - // NumberValue JSON element 100 or 0.10 - NumberValue - // NilValue JSON element null - NilValue - // BoolValue JSON element true or false - BoolValue - // ArrayValue JSON element [] - ArrayValue - // ObjectValue JSON element {} - ObjectValue -) - -var hexDigits []byte -var valueTypes []ValueType - -func init() { - hexDigits = make([]byte, 256) - for i := 0; i < len(hexDigits); i++ { - hexDigits[i] = 255 - } - for i := '0'; i <= '9'; i++ { - hexDigits[i] = byte(i - '0') - } - for i := 'a'; i <= 'f'; i++ { - hexDigits[i] = byte((i - 'a') + 10) - } - for i := 'A'; i <= 'F'; i++ { - hexDigits[i] = byte((i - 'A') + 10) - } - valueTypes = make([]ValueType, 256) - for i := 0; i < len(valueTypes); i++ { - valueTypes[i] = InvalidValue - } - valueTypes['"'] = StringValue - valueTypes['-'] = NumberValue - valueTypes['0'] = NumberValue - valueTypes['1'] = NumberValue - valueTypes['2'] = NumberValue - valueTypes['3'] = NumberValue - valueTypes['4'] = NumberValue - valueTypes['5'] = NumberValue - valueTypes['6'] = NumberValue - valueTypes['7'] = NumberValue - valueTypes['8'] = NumberValue - valueTypes['9'] = NumberValue - valueTypes['t'] = BoolValue - valueTypes['f'] = BoolValue - valueTypes['n'] = NilValue - valueTypes['['] = ArrayValue - valueTypes['{'] = ObjectValue -} - -// Iterator is a io.Reader like object, with JSON specific read functions. -// Error is not returned as return value, but stored as Error member on this iterator instance. -type Iterator struct { - cfg *frozenConfig - reader io.Reader - buf []byte - head int - tail int - captureStartedAt int - captured []byte - Error error -} - -// NewIterator creates an empty Iterator instance -func NewIterator(cfg API) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: nil, - head: 0, - tail: 0, - } -} - -// Parse creates an Iterator instance from io.Reader -func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: reader, - buf: make([]byte, bufSize), - head: 0, - tail: 0, - } -} - -// ParseBytes creates an Iterator instance from byte array -func ParseBytes(cfg API, input []byte) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: input, - head: 0, - tail: len(input), - } -} - -// ParseString creates an Iterator instance from string -func ParseString(cfg API, input string) *Iterator { - return ParseBytes(cfg, []byte(input)) -} - -// Pool returns a pool can provide more iterator with same configuration -func (iter *Iterator) Pool() IteratorPool { - return iter.cfg -} - -// Reset reuse iterator instance by specifying another reader -func (iter *Iterator) Reset(reader io.Reader) *Iterator { - iter.reader = reader - iter.head = 0 - iter.tail = 0 - return iter -} - -// ResetBytes reuse iterator instance by specifying another byte array as input -func (iter *Iterator) ResetBytes(input []byte) *Iterator { - iter.reader = nil - iter.buf = input - iter.head = 0 - iter.tail = len(input) - return iter -} - -// WhatIsNext gets ValueType of relatively next json element -func (iter *Iterator) WhatIsNext() ValueType { - valueType := valueTypes[iter.nextToken()] - iter.unreadByte() - return valueType -} - -func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i - return false - } - return true -} - -func (iter *Iterator) isObjectEnd() bool { - c := iter.nextToken() - if c == ',' { - return false - } - if c == '}' { - return true - } - iter.ReportError("isObjectEnd", "object ended prematurely") - return true -} - -func (iter *Iterator) nextToken() byte { - // a variation of skip whitespaces, returning the next non-whitespace token - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i + 1 - return c - } - if !iter.loadMore() { - return 0 - } - } -} - -// ReportError record a error in iterator instance with current position. -func (iter *Iterator) ReportError(operation string, msg string) { - if iter.Error != nil { - if iter.Error != io.EOF { - return - } - } - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - iter.Error = fmt.Errorf("%s: %s, parsing %v ...%s... at %s", operation, msg, iter.head, - string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) -} - -// CurrentBuffer gets current buffer as string for debugging purpose -func (iter *Iterator) CurrentBuffer() string { - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - return fmt.Sprintf("parsing %v ...|%s|... at %s", iter.head, - string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) -} - -func (iter *Iterator) readByte() (ret byte) { - if iter.head == iter.tail { - if iter.loadMore() { - ret = iter.buf[iter.head] - iter.head++ - return ret - } - return 0 - } - ret = iter.buf[iter.head] - iter.head++ - return ret -} - -func (iter *Iterator) loadMore() bool { - if iter.reader == nil { - if iter.Error == nil { - iter.head = iter.tail - iter.Error = io.EOF - } - return false - } - if iter.captured != nil { - iter.captured = append(iter.captured, - iter.buf[iter.captureStartedAt:iter.tail]...) - iter.captureStartedAt = 0 - } - for { - n, err := iter.reader.Read(iter.buf) - if n == 0 { - if err != nil { - if iter.Error == nil { - iter.Error = err - } - return false - } - } else { - iter.head = 0 - iter.tail = n - return true - } - } -} - -func (iter *Iterator) unreadByte() { - if iter.Error != nil { - return - } - iter.head-- - return -} - -// Read read the next JSON element as generic interface{}. -func (iter *Iterator) Read() interface{} { - valueType := iter.WhatIsNext() - switch valueType { - case StringValue: - return iter.ReadString() - case NumberValue: - if iter.cfg.configBeforeFrozen.UseNumber { - return json.Number(iter.readNumberAsString()) - } - return iter.ReadFloat64() - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - return nil - case BoolValue: - return iter.ReadBool() - case ArrayValue: - arr := []interface{}{} - iter.ReadArrayCB(func(iter *Iterator) bool { - var elem interface{} - iter.ReadVal(&elem) - arr = append(arr, elem) - return true - }) - return arr - case ObjectValue: - obj := map[string]interface{}{} - iter.ReadMapCB(func(Iter *Iterator, field string) bool { - var elem interface{} - iter.ReadVal(&elem) - obj[field] = elem - return true - }) - return obj - default: - iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) - return nil - } -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_array.go b/vendor/github.com/json-iterator/go/feature_iter_array.go deleted file mode 100644 index cbc3ec8d16a..00000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_array.go +++ /dev/null @@ -1,58 +0,0 @@ -package jsoniter - -// ReadArray read array element, tells if the array has more element to read. -func (iter *Iterator) ReadArray() (ret bool) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return false // null - case '[': - c = iter.nextToken() - if c != ']' { - iter.unreadByte() - return true - } - return false - case ']': - return false - case ',': - return true - default: - iter.ReportError("ReadArray", "expect [ or , or ] or n, but found: "+string([]byte{c})) - return - } -} - -// ReadArrayCB read array with callback -func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { - c := iter.nextToken() - if c == '[' { - c = iter.nextToken() - if c != ']' { - iter.unreadByte() - if !callback(iter) { - return false - } - c = iter.nextToken() - for c == ',' { - if !callback(iter) { - return false - } - c = iter.nextToken() - } - if c != ']' { - iter.ReportError("ReadArrayCB", "expect ] in the end") - return false - } - return true - } - return true - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadArrayCB", "expect [ or n, but found: "+string([]byte{c})) - return false -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_float.go b/vendor/github.com/json-iterator/go/feature_iter_float.go deleted file mode 100644 index 86f45991224..00000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_float.go +++ /dev/null @@ -1,341 +0,0 @@ -package jsoniter - -import ( - "io" - "math/big" - "strconv" - "strings" - "unsafe" -) - -var floatDigits []int8 - -const invalidCharForNumber = int8(-1) -const endOfNumber = int8(-2) -const dotInNumber = int8(-3) - -func init() { - floatDigits = make([]int8, 256) - for i := 0; i < len(floatDigits); i++ { - floatDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - floatDigits[i] = i - int8('0') - } - floatDigits[','] = endOfNumber - floatDigits[']'] = endOfNumber - floatDigits['}'] = endOfNumber - floatDigits[' '] = endOfNumber - floatDigits['\t'] = endOfNumber - floatDigits['\n'] = endOfNumber - floatDigits['.'] = dotInNumber -} - -// ReadBigFloat read big.Float -func (iter *Iterator) ReadBigFloat() (ret *big.Float) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - prec := 64 - if len(str) > prec { - prec = len(str) - } - val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) - if err != nil { - iter.Error = err - return nil - } - return val -} - -// ReadBigInt read big.Int -func (iter *Iterator) ReadBigInt() (ret *big.Int) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - ret = big.NewInt(0) - var success bool - ret, success = ret.SetString(str, 10) - if !success { - iter.ReportError("ReadBigInt", "invalid big int") - return nil - } - return ret -} - -//ReadFloat32 read float32 -func (iter *Iterator) ReadFloat32() (ret float32) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat32() - } - iter.unreadByte() - return iter.readPositiveFloat32() -} - -func (iter *Iterator) readPositiveFloat32() (ret float32) { - value := uint64(0) - c := byte(' ') - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c = iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.ReportError("readFloat32", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat32", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat32", "leading zero is invalid") - return - } - } - value = uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.head = i - return float32(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat32SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float32(float64(value) / float64(pow10[decimalPlaces])) - } - // too many decimal places - return iter.readFloat32SlowPath() - case invalidCharForNumber: - fallthrough - case dotInNumber: - return iter.readFloat32SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat32SlowPath() -} - -func (iter *Iterator) readNumberAsString() (ret string) { - strBuf := [16]byte{} - str := strBuf[0:0] -load_loop: - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - str = append(str, c) - continue - default: - iter.head = i - break load_loop - } - } - if !iter.loadMore() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - return - } - if len(str) == 0 { - iter.ReportError("readNumberAsString", "invalid number") - } - return *(*string)(unsafe.Pointer(&str)) -} - -func (iter *Iterator) readFloat32SlowPath() (ret float32) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat32SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 32) - if err != nil { - iter.Error = err - return - } - return float32(val) -} - -// ReadFloat64 read float64 -func (iter *Iterator) ReadFloat64() (ret float64) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat64() - } - iter.unreadByte() - return iter.readPositiveFloat64() -} - -func (iter *Iterator) readPositiveFloat64() (ret float64) { - value := uint64(0) - c := byte(' ') - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c = iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.ReportError("readFloat64", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat64", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat64", "leading zero is invalid") - return - } - } - value = uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.head = i - return float64(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat64SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float64(value) / float64(pow10[decimalPlaces]) - } - // too many decimal places - return iter.readFloat64SlowPath() - case invalidCharForNumber: - fallthrough - case dotInNumber: - return iter.readFloat64SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat64SlowPath() -} - -func (iter *Iterator) readFloat64SlowPath() (ret float64) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat64SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 64) - if err != nil { - iter.Error = err - return - } - return val -} - -func validateFloat(str string) string { - // strconv.ParseFloat is not validating `1.` or `1.e1` - if len(str) == 0 { - return "empty number" - } - if str[0] == '-' { - return "-- is not valid" - } - dotPos := strings.IndexByte(str, '.') - if dotPos != -1 { - if dotPos == len(str)-1 { - return "dot can not be last character" - } - switch str[dotPos+1] { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - return "missing digit after dot" - } - } - return "" -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_int.go b/vendor/github.com/json-iterator/go/feature_iter_int.go deleted file mode 100644 index 886879efdbb..00000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_int.go +++ /dev/null @@ -1,258 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var intDigits []int8 - -const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 -const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 - -func init() { - intDigits = make([]int8, 256) - for i := 0; i < len(intDigits); i++ { - intDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - intDigits[i] = i - int8('0') - } -} - -// ReadUint read uint -func (iter *Iterator) ReadUint() uint { - return uint(iter.ReadUint64()) -} - -// ReadInt read int -func (iter *Iterator) ReadInt() int { - return int(iter.ReadInt64()) -} - -// ReadInt8 read int8 -func (iter *Iterator) ReadInt8() (ret int8) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt8+1 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int8(val) - } - val := iter.readUint32(c) - if val > math.MaxInt8 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int8(val) -} - -// ReadUint8 read uint8 -func (iter *Iterator) ReadUint8() (ret uint8) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint8 { - iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint8(val) -} - -// ReadInt16 read int16 -func (iter *Iterator) ReadInt16() (ret int16) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt16+1 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int16(val) - } - val := iter.readUint32(c) - if val > math.MaxInt16 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int16(val) -} - -// ReadUint16 read uint16 -func (iter *Iterator) ReadUint16() (ret uint16) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint16 { - iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint16(val) -} - -// ReadInt32 read int32 -func (iter *Iterator) ReadInt32() (ret int32) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt32+1 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int32(val) - } - val := iter.readUint32(c) - if val > math.MaxInt32 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int32(val) -} - -// ReadUint32 read uint32 -func (iter *Iterator) ReadUint32() (ret uint32) { - return iter.readUint32(iter.nextToken()) -} - -func (iter *Iterator) readUint32(c byte) (ret uint32) { - ind := intDigits[c] - if ind == 0 { - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint32(ind) - if iter.tail-iter.head > 10 { - i := iter.head - ind2 := intDigits[iter.buf[i]] - if ind2 == invalidCharForNumber { - iter.head = i - return value - } - i++ - ind3 := intDigits[iter.buf[i]] - if ind3 == invalidCharForNumber { - iter.head = i - return value*10 + uint32(ind2) - } - //iter.head = i + 1 - //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) - i++ - ind4 := intDigits[iter.buf[i]] - if ind4 == invalidCharForNumber { - iter.head = i - return value*100 + uint32(ind2)*10 + uint32(ind3) - } - i++ - ind5 := intDigits[iter.buf[i]] - if ind5 == invalidCharForNumber { - iter.head = i - return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) - } - i++ - ind6 := intDigits[iter.buf[i]] - if ind6 == invalidCharForNumber { - iter.head = i - return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) - } - i++ - ind7 := intDigits[iter.buf[i]] - if ind7 == invalidCharForNumber { - iter.head = i - return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) - } - i++ - ind8 := intDigits[iter.buf[i]] - if ind8 == invalidCharForNumber { - iter.head = i - return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) - } - i++ - ind9 := intDigits[iter.buf[i]] - value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) - iter.head = i - if ind9 == invalidCharForNumber { - return value - } - } - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - return value - } - if value > uint32SafeToMultiply10 { - value2 := (value << 3) + (value << 1) + uint32(ind) - if value2 < value { - iter.ReportError("readUint32", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint32(ind) - } - if !iter.loadMore() { - return value - } - } -} - -// ReadInt64 read int64 -func (iter *Iterator) ReadInt64() (ret int64) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint64(iter.readByte()) - if val > math.MaxInt64+1 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return -int64(val) - } - val := iter.readUint64(c) - if val > math.MaxInt64 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return int64(val) -} - -// ReadUint64 read uint64 -func (iter *Iterator) ReadUint64() uint64 { - return iter.readUint64(iter.nextToken()) -} - -func (iter *Iterator) readUint64(c byte) (ret uint64) { - ind := intDigits[c] - if ind == 0 { - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint64(ind) - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - return value - } - if value > uint64SafeToMultiple10 { - value2 := (value << 3) + (value << 1) + uint64(ind) - if value2 < value { - iter.ReportError("readUint64", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint64(ind) - } - if !iter.loadMore() { - return value - } - } -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_object.go b/vendor/github.com/json-iterator/go/feature_iter_object.go deleted file mode 100644 index 3bdb5576eed..00000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_object.go +++ /dev/null @@ -1,212 +0,0 @@ -package jsoniter - -import ( - "fmt" - "unicode" - "unsafe" -) - -// ReadObject read one field from object. -// If object ended, returns empty string. -// Otherwise, returns the field name. -func (iter *Iterator) ReadObject() (ret string) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return "" // null - case '{': - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - return string(iter.readObjectFieldAsBytes()) - } - if c == '}' { - return "" // end of object - } - iter.ReportError("ReadObject", `expect " after {`) - return - case ',': - return string(iter.readObjectFieldAsBytes()) - case '}': - return "" // end of object - default: - iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) - return - } -} - -func (iter *Iterator) readFieldHash() int32 { - hash := int64(0x811c9dc5) - c := iter.nextToken() - if c == '"' { - for { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - b := iter.buf[i] - if 'A' <= b && b <= 'Z' { - b += 'a' - 'A' - } - if b == '"' { - iter.head = i + 1 - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - } - return int32(hash) - } - hash ^= int64(b) - hash *= 0x1000193 - } - if !iter.loadMore() { - iter.ReportError("readFieldHash", `incomplete field name`) - return 0 - } - } - } - iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) - return 0 -} - -func calcHash(str string) int32 { - hash := int64(0x811c9dc5) - for _, b := range str { - hash ^= int64(unicode.ToLower(b)) - hash *= 0x1000193 - } - return int32(hash) -} - -// ReadObjectCB read object with callback, the key is ascii only and field name not copied -func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.readObjectFieldAsBytes() - if !callback(iter, *(*string)(unsafe.Pointer(&field))) { - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.readObjectFieldAsBytes() - if !callback(iter, *(*string)(unsafe.Pointer(&field))) { - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadObjectCB", `object not ended with }`) - return false - } - return true - } - if c == '}' { - return true - } - iter.ReportError("ReadObjectCB", `expect " after }`) - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadObjectCB", `expect { or n`) - return false -} - -// ReadMapCB read map with callback, the key can be any string -func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field") - return false - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field") - return false - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadMapCB", `object not ended with }`) - return false - } - return true - } - if c == '}' { - return true - } - iter.ReportError("ReadMapCB", `expect " after }`) - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadMapCB", `expect { or n`) - return false -} - -func (iter *Iterator) readObjectStart() bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '}' { - return false - } - iter.unreadByte() - return true - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return false - } - iter.ReportError("readObjectStart", "expect { or n") - return false -} - -func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { - str := iter.ReadStringAsSlice() - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if iter.buf[iter.head] != ':' { - iter.ReportError("readObjectFieldAsBytes", "expect : after object field") - return - } - iter.head++ - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if ret == nil { - return str - } - return ret -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip.go b/vendor/github.com/json-iterator/go/feature_iter_skip.go deleted file mode 100644 index b008d98c99a..00000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_skip.go +++ /dev/null @@ -1,127 +0,0 @@ -package jsoniter - -import "fmt" - -// ReadNil reads a json object as nil and -// returns whether it's a nil or not -func (iter *Iterator) ReadNil() (ret bool) { - c := iter.nextToken() - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') // null - return true - } - iter.unreadByte() - return false -} - -// ReadBool reads a json object as BoolValue -func (iter *Iterator) ReadBool() (ret bool) { - c := iter.nextToken() - if c == 't' { - iter.skipThreeBytes('r', 'u', 'e') - return true - } - if c == 'f' { - iter.skipFourBytes('a', 'l', 's', 'e') - return false - } - iter.ReportError("ReadBool", "expect t or f") - return -} - -// SkipAndReturnBytes skip next JSON element, and return its content as []byte. -// The []byte can be kept, it is a copy of data. -func (iter *Iterator) SkipAndReturnBytes() []byte { - iter.startCapture(iter.head) - iter.Skip() - return iter.stopCapture() -} - -type captureBuffer struct { - startedAt int - captured []byte -} - -func (iter *Iterator) startCapture(captureStartedAt int) { - if iter.captured != nil { - panic("already in capture mode") - } - iter.captureStartedAt = captureStartedAt - iter.captured = make([]byte, 0, 32) -} - -func (iter *Iterator) stopCapture() []byte { - if iter.captured == nil { - panic("not in capture mode") - } - captured := iter.captured - remaining := iter.buf[iter.captureStartedAt:iter.head] - iter.captureStartedAt = -1 - iter.captured = nil - if len(captured) == 0 { - return remaining - } - captured = append(captured, remaining...) - return captured -} - -// Skip skips a json object and positions to relatively the next json object -func (iter *Iterator) Skip() { - c := iter.nextToken() - switch c { - case '"': - iter.skipString() - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - case '0': - iter.unreadByte() - iter.ReadFloat32() - case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.skipNumber() - case '[': - iter.skipArray() - case '{': - iter.skipObject() - default: - iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) - return - } -} - -func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { - if iter.readByte() != b1 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b2 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b3 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b4 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } -} - -func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { - if iter.readByte() != b1 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } - if iter.readByte() != b2 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } - if iter.readByte() != b3 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go deleted file mode 100644 index 047d58a4bc9..00000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go +++ /dev/null @@ -1,144 +0,0 @@ -//+build jsoniter-sloppy - -package jsoniter - -// sloppy but faster implementation, do not validate the input json - -func (iter *Iterator) skipNumber() { - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\r', '\t', ',', '}', ']': - iter.head = i - return - } - } - if !iter.loadMore() { - return - } - } -} - -func (iter *Iterator) skipArray() { - level := 1 - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '[': // If open symbol, increase level - level++ - case ']': // If close symbol, increase level - level-- - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete array") - return - } - } -} - -func (iter *Iterator) skipObject() { - level := 1 - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '{': // If open symbol, increase level - level++ - case '}': // If close symbol, increase level - level-- - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete object") - return - } - } -} - -func (iter *Iterator) skipString() { - for { - end, escaped := iter.findStringEnd() - if end == -1 { - if !iter.loadMore() { - iter.ReportError("skipString", "incomplete string") - return - } - if escaped { - iter.head = 1 // skip the first char as last char read is \ - } - } else { - iter.head = end - return - } - } -} - -// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go -// Tries to find the end of string -// Support if string contains escaped quote symbols. -func (iter *Iterator) findStringEnd() (int, bool) { - escaped := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - if !escaped { - return i + 1, false - } - j := i - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return i + 1, true - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - } - } else if c == '\\' { - escaped = true - } - } - j := iter.tail - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return -1, false // do not end with \ - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - - } - return -1, true // end with \ -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go deleted file mode 100644 index d2676382540..00000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go +++ /dev/null @@ -1,89 +0,0 @@ -//+build !jsoniter-sloppy - -package jsoniter - -import "fmt" - -func (iter *Iterator) skipNumber() { - if !iter.trySkipNumber() { - iter.unreadByte() - iter.ReadFloat32() - } -} - -func (iter *Iterator) trySkipNumber() bool { - dotFound := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - case '.': - if dotFound { - iter.ReportError("validateNumber", `more than one dot found in number`) - return true // already failed - } - if i+1 == iter.tail { - return false - } - c = iter.buf[i+1] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - iter.ReportError("validateNumber", `missing digit after dot`) - return true // already failed - } - dotFound = true - default: - switch c { - case ',', ']', '}', ' ', '\t', '\n', '\r': - if iter.head == i { - return false // if - without following digits - } - iter.head = i - return true // must be valid - } - return false // may be invalid - } - } - return false -} - -func (iter *Iterator) skipString() { - if !iter.trySkipString() { - iter.unreadByte() - iter.ReadString() - } -} - -func (iter *Iterator) trySkipString() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - iter.head = i + 1 - return true // valid - } else if c == '\\' { - return false - } else if c < ' ' { - iter.ReportError("ReadString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return true // already failed - } - } - return false -} - -func (iter *Iterator) skipObject() { - iter.unreadByte() - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - return true - }) -} - -func (iter *Iterator) skipArray() { - iter.unreadByte() - iter.ReadArrayCB(func(iter *Iterator) bool { - iter.Skip() - return true - }) -} diff --git a/vendor/github.com/json-iterator/go/feature_iter_string.go b/vendor/github.com/json-iterator/go/feature_iter_string.go deleted file mode 100644 index b764600460e..00000000000 --- a/vendor/github.com/json-iterator/go/feature_iter_string.go +++ /dev/null @@ -1,215 +0,0 @@ -package jsoniter - -import ( - "fmt" - "unicode/utf16" -) - -// ReadString read string from iterator -func (iter *Iterator) ReadString() (ret string) { - c := iter.nextToken() - if c == '"' { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - ret = string(iter.buf[iter.head:i]) - iter.head = i + 1 - return ret - } else if c == '\\' { - break - } else if c < ' ' { - iter.ReportError("ReadString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return - } - } - return iter.readStringSlowPath() - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return "" - } - iter.ReportError("ReadString", `expects " or n`) - return -} - -func (iter *Iterator) readStringSlowPath() (ret string) { - var str []byte - var c byte - for iter.Error == nil { - c = iter.readByte() - if c == '"' { - return string(str) - } - if c == '\\' { - c = iter.readByte() - str = iter.readEscapedChar(c, str) - } else { - str = append(str, c) - } - } - iter.ReportError("ReadString", "unexpected end of input") - return -} - -func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { - switch c { - case 'u': - r := iter.readU4() - if utf16.IsSurrogate(r) { - c = iter.readByte() - if iter.Error != nil { - return nil - } - if c != '\\' { - iter.unreadByte() - str = appendRune(str, r) - return str - } - c = iter.readByte() - if iter.Error != nil { - return nil - } - if c != 'u' { - str = appendRune(str, r) - return iter.readEscapedChar(c, str) - } - r2 := iter.readU4() - if iter.Error != nil { - return nil - } - combined := utf16.DecodeRune(r, r2) - if combined == '\uFFFD' { - str = appendRune(str, r) - str = appendRune(str, r2) - } else { - str = appendRune(str, combined) - } - } else { - str = appendRune(str, r) - } - case '"': - str = append(str, '"') - case '\\': - str = append(str, '\\') - case '/': - str = append(str, '/') - case 'b': - str = append(str, '\b') - case 'f': - str = append(str, '\f') - case 'n': - str = append(str, '\n') - case 'r': - str = append(str, '\r') - case 't': - str = append(str, '\t') - default: - iter.ReportError("ReadString", - `invalid escape char after \`) - return nil - } - return str -} - -// ReadStringAsSlice read string from iterator without copying into string form. -// The []byte can not be kept, as it will change after next iterator call. -func (iter *Iterator) ReadStringAsSlice() (ret []byte) { - c := iter.nextToken() - if c == '"' { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - // for: field name, base64, number - if iter.buf[i] == '"' { - // fast path: reuse the underlying buffer - ret = iter.buf[iter.head:i] - iter.head = i + 1 - return ret - } - } - readLen := iter.tail - iter.head - copied := make([]byte, readLen, readLen*2) - copy(copied, iter.buf[iter.head:iter.tail]) - iter.head = iter.tail - for iter.Error == nil { - c := iter.readByte() - if c == '"' { - return copied - } - copied = append(copied, c) - } - return copied - } - iter.ReportError("ReadString", `expects " or n`) - return -} - -func (iter *Iterator) readU4() (ret rune) { - for i := 0; i < 4; i++ { - c := iter.readByte() - if iter.Error != nil { - return - } - if c >= '0' && c <= '9' { - ret = ret*16 + rune(c-'0') - } else if c >= 'a' && c <= 'f' { - ret = ret*16 + rune(c-'a'+10) - } else if c >= 'A' && c <= 'F' { - ret = ret*16 + rune(c-'A'+10) - } else { - iter.ReportError("readU4", "expects 0~9 or a~f") - return - } - } - return ret -} - -const ( - t1 = 0x00 // 0000 0000 - tx = 0x80 // 1000 0000 - t2 = 0xC0 // 1100 0000 - t3 = 0xE0 // 1110 0000 - t4 = 0xF0 // 1111 0000 - t5 = 0xF8 // 1111 1000 - - maskx = 0x3F // 0011 1111 - mask2 = 0x1F // 0001 1111 - mask3 = 0x0F // 0000 1111 - mask4 = 0x07 // 0000 0111 - - rune1Max = 1<<7 - 1 - rune2Max = 1<<11 - 1 - rune3Max = 1<<16 - 1 - - surrogateMin = 0xD800 - surrogateMax = 0xDFFF - - maxRune = '\U0010FFFF' // Maximum valid Unicode code point. - runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" -) - -func appendRune(p []byte, r rune) []byte { - // Negative values are erroneous. Making it unsigned addresses the problem. - switch i := uint32(r); { - case i <= rune1Max: - p = append(p, byte(r)) - return p - case i <= rune2Max: - p = append(p, t2|byte(r>>6)) - p = append(p, tx|byte(r)&maskx) - return p - case i > maxRune, surrogateMin <= i && i <= surrogateMax: - r = runeError - fallthrough - case i <= rune3Max: - p = append(p, t3|byte(r>>12)) - p = append(p, tx|byte(r>>6)&maskx) - p = append(p, tx|byte(r)&maskx) - return p - default: - p = append(p, t4|byte(r>>18)) - p = append(p, tx|byte(r>>12)&maskx) - p = append(p, tx|byte(r>>6)&maskx) - p = append(p, tx|byte(r)&maskx) - return p - } -} diff --git a/vendor/github.com/json-iterator/go/feature_json_number.go b/vendor/github.com/json-iterator/go/feature_json_number.go deleted file mode 100644 index 0439f672528..00000000000 --- a/vendor/github.com/json-iterator/go/feature_json_number.go +++ /dev/null @@ -1,15 +0,0 @@ -package jsoniter - -import "encoding/json" - -type Number string - -func CastJsonNumber(val interface{}) (string, bool) { - switch typedVal := val.(type) { - case json.Number: - return string(typedVal), true - case Number: - return string(typedVal), true - } - return "", false -} diff --git a/vendor/github.com/json-iterator/go/feature_pool.go b/vendor/github.com/json-iterator/go/feature_pool.go deleted file mode 100644 index 73962bc6f6c..00000000000 --- a/vendor/github.com/json-iterator/go/feature_pool.go +++ /dev/null @@ -1,57 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// IteratorPool a thread safe pool of iterators with same configuration -type IteratorPool interface { - BorrowIterator(data []byte) *Iterator - ReturnIterator(iter *Iterator) -} - -// StreamPool a thread safe pool of streams with same configuration -type StreamPool interface { - BorrowStream(writer io.Writer) *Stream - ReturnStream(stream *Stream) -} - -func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { - select { - case stream := <-cfg.streamPool: - stream.Reset(writer) - return stream - default: - return NewStream(cfg, writer, 512) - } -} - -func (cfg *frozenConfig) ReturnStream(stream *Stream) { - stream.Error = nil - select { - case cfg.streamPool <- stream: - return - default: - return - } -} - -func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { - select { - case iter := <-cfg.iteratorPool: - iter.ResetBytes(data) - return iter - default: - return ParseBytes(cfg, data) - } -} - -func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { - iter.Error = nil - select { - case cfg.iteratorPool <- iter: - return - default: - return - } -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect.go b/vendor/github.com/json-iterator/go/feature_reflect.go deleted file mode 100644 index 05d91b49c8b..00000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect.go +++ /dev/null @@ -1,691 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "fmt" - "reflect" - "time" - "unsafe" -) - -// ValDecoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValDecoder with json.Decoder. -// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). -// -// Reflection on type to create decoders, which is then cached -// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions -// 1. create instance of new value, for example *int will need a int to be allocated -// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New -// 3. assignment to map, both key and value will be reflect.Value -// For a simple struct binding, it will be reflect.Value free and allocation free -type ValDecoder interface { - Decode(ptr unsafe.Pointer, iter *Iterator) -} - -// ValEncoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValEncoder with json.Encoder. -// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). -type ValEncoder interface { - IsEmpty(ptr unsafe.Pointer) bool - Encode(ptr unsafe.Pointer, stream *Stream) - EncodeInterface(val interface{}, stream *Stream) -} - -type checkIsEmpty interface { - IsEmpty(ptr unsafe.Pointer) bool -} - -// WriteToStream the default implementation for TypeEncoder method EncodeInterface -func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) { - e := (*emptyInterface)(unsafe.Pointer(&val)) - if e.word == nil { - stream.WriteNil() - return - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -var jsonNumberType reflect.Type -var jsoniterNumberType reflect.Type -var jsonRawMessageType reflect.Type -var jsoniterRawMessageType reflect.Type -var anyType reflect.Type -var marshalerType reflect.Type -var unmarshalerType reflect.Type -var textMarshalerType reflect.Type -var textUnmarshalerType reflect.Type - -func init() { - jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem() - jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem() - jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() - jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem() - anyType = reflect.TypeOf((*Any)(nil)).Elem() - marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() - textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() -} - -type optionalDecoder struct { - valueType reflect.Type - valueDecoder ValDecoder -} - -func (decoder *optionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - *((*unsafe.Pointer)(ptr)) = nil - } else { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - value := reflect.New(decoder.valueType) - newPtr := extractInterface(value.Interface()).word - decoder.valueDecoder.Decode(newPtr, iter) - *((*uintptr)(ptr)) = uintptr(newPtr) - } else { - //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } - } -} - -type deferenceDecoder struct { - // only to deference a pointer - valueType reflect.Type - valueDecoder ValDecoder -} - -func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - value := reflect.New(decoder.valueType) - newPtr := extractInterface(value.Interface()).word - decoder.valueDecoder.Decode(newPtr, iter) - *((*uintptr)(ptr)) = uintptr(newPtr) - } else { - //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } -} - -type optionalEncoder struct { - valueEncoder ValEncoder -} - -func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *optionalEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *optionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { - if *((*unsafe.Pointer)(ptr)) == nil { - return true - } - return false -} - -type placeholderEncoder struct { - cfg *frozenConfig - cacheKey reflect.Type -} - -func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.getRealEncoder().Encode(ptr, stream) -} - -func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.getRealEncoder().IsEmpty(ptr) -} - -func (encoder *placeholderEncoder) getRealEncoder() ValEncoder { - for i := 0; i < 30; i++ { - realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey) - _, isPlaceholder := realDecoder.(*placeholderEncoder) - if isPlaceholder { - time.Sleep(time.Second) - } else { - return realDecoder - } - } - panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey)) -} - -type placeholderDecoder struct { - cfg *frozenConfig - cacheKey reflect.Type -} - -func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - for i := 0; i < 30; i++ { - realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey) - _, isPlaceholder := realDecoder.(*placeholderDecoder) - if isPlaceholder { - time.Sleep(time.Second) - } else { - realDecoder.Decode(ptr, iter) - return - } - } - panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey)) -} - -// emptyInterface is the header for an interface{} value. -type emptyInterface struct { - typ unsafe.Pointer - word unsafe.Pointer -} - -// emptyInterface is the header for an interface with method (not interface{}) -type nonEmptyInterface struct { - // see ../runtime/iface.go:/Itab - itab *struct { - ityp unsafe.Pointer // static interface type - typ unsafe.Pointer // dynamic concrete type - link unsafe.Pointer - bad int32 - unused int32 - fun [100000]unsafe.Pointer // method table - } - word unsafe.Pointer -} - -// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal -func (iter *Iterator) ReadVal(obj interface{}) { - typ := reflect.TypeOf(obj) - cacheKey := typ.Elem() - decoder, err := decoderOfType(iter.cfg, cacheKey) - if err != nil { - iter.Error = err - return - } - e := (*emptyInterface)(unsafe.Pointer(&obj)) - decoder.Decode(e.word, iter) -} - -// WriteVal copy the go interface into underlying JSON, same as json.Marshal -func (stream *Stream) WriteVal(val interface{}) { - if nil == val { - stream.WriteNil() - return - } - typ := reflect.TypeOf(val) - cacheKey := typ - encoder, err := encoderOfType(stream.cfg, cacheKey) - if err != nil { - stream.Error = err - return - } - encoder.EncodeInterface(val, stream) -} - -type prefix string - -func (p prefix) addToDecoder(decoder ValDecoder, err error) (ValDecoder, error) { - if err != nil { - return nil, fmt.Errorf("%s: %s", p, err.Error()) - } - return decoder, err -} - -func (p prefix) addToEncoder(encoder ValEncoder, err error) (ValEncoder, error) { - if err != nil { - return nil, fmt.Errorf("%s: %s", p, err.Error()) - } - return encoder, err -} - -func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - cacheKey := typ - decoder := cfg.getDecoderFromCache(cacheKey) - if decoder != nil { - return decoder, nil - } - decoder = getTypeDecoderFromExtension(typ) - if decoder != nil { - cfg.addDecoderToCache(cacheKey, decoder) - return decoder, nil - } - decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey} - cfg.addDecoderToCache(cacheKey, decoder) - decoder, err := createDecoderOfType(cfg, typ) - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - cfg.addDecoderToCache(cacheKey, decoder) - return decoder, err -} - -func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - typeName := typ.String() - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{}, nil - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{}, nil - } - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{}, nil - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{}, nil - } - if typ.Implements(unmarshalerType) { - templateInterface := reflect.New(typ).Elem().Interface() - var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} - if typ.Kind() == reflect.Ptr { - decoder = &optionalDecoder{typ.Elem(), decoder} - } - return decoder, nil - } - if reflect.PtrTo(typ).Implements(unmarshalerType) { - templateInterface := reflect.New(typ).Interface() - var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} - return decoder, nil - } - if typ.Implements(textUnmarshalerType) { - templateInterface := reflect.New(typ).Elem().Interface() - var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} - if typ.Kind() == reflect.Ptr { - decoder = &optionalDecoder{typ.Elem(), decoder} - } - return decoder, nil - } - if reflect.PtrTo(typ).Implements(textUnmarshalerType) { - templateInterface := reflect.New(typ).Interface() - var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} - return decoder, nil - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { - sliceDecoder, err := prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) - if err != nil { - return nil, err - } - return &base64Codec{sliceDecoder: sliceDecoder}, nil - } - if typ.Implements(anyType) { - return &anyCodec{}, nil - } - switch typ.Kind() { - case reflect.String: - if typeName != "string" { - return decoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) - } - return &stringCodec{}, nil - case reflect.Int: - if typeName != "int" { - return decoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) - } - return &intCodec{}, nil - case reflect.Int8: - if typeName != "int8" { - return decoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) - } - return &int8Codec{}, nil - case reflect.Int16: - if typeName != "int16" { - return decoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) - } - return &int16Codec{}, nil - case reflect.Int32: - if typeName != "int32" { - return decoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) - } - return &int32Codec{}, nil - case reflect.Int64: - if typeName != "int64" { - return decoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) - } - return &int64Codec{}, nil - case reflect.Uint: - if typeName != "uint" { - return decoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) - } - return &uintCodec{}, nil - case reflect.Uint8: - if typeName != "uint8" { - return decoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) - } - return &uint8Codec{}, nil - case reflect.Uint16: - if typeName != "uint16" { - return decoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) - } - return &uint16Codec{}, nil - case reflect.Uint32: - if typeName != "uint32" { - return decoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) - } - return &uint32Codec{}, nil - case reflect.Uintptr: - if typeName != "uintptr" { - return decoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) - } - return &uintptrCodec{}, nil - case reflect.Uint64: - if typeName != "uint64" { - return decoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) - } - return &uint64Codec{}, nil - case reflect.Float32: - if typeName != "float32" { - return decoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) - } - return &float32Codec{}, nil - case reflect.Float64: - if typeName != "float64" { - return decoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) - } - return &float64Codec{}, nil - case reflect.Bool: - if typeName != "bool" { - return decoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) - } - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return prefix(fmt.Sprintf("[%s]", typeName)).addToDecoder(decoderOfStruct(cfg, typ)) - case reflect.Array: - return prefix("[array]").addToDecoder(decoderOfArray(cfg, typ)) - case reflect.Slice: - return prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ)) - case reflect.Map: - return prefix("[map]").addToDecoder(decoderOfMap(cfg, typ)) - case reflect.Ptr: - return prefix("[optional]").addToDecoder(decoderOfOptional(cfg, typ)) - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - cacheKey := typ - encoder := cfg.getEncoderFromCache(cacheKey) - if encoder != nil { - return encoder, nil - } - encoder = getTypeEncoderFromExtension(typ) - if encoder != nil { - cfg.addEncoderToCache(cacheKey, encoder) - return encoder, nil - } - encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey} - cfg.addEncoderToCache(cacheKey, encoder) - encoder, err := createEncoderOfType(cfg, typ) - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - cfg.addEncoderToCache(cacheKey, encoder) - return encoder, err -} - -func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{}, nil - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{}, nil - } - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{}, nil - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{}, nil - } - if typ.Implements(marshalerType) { - checkIsEmpty, err := createCheckIsEmpty(typ) - if err != nil { - return nil, err - } - templateInterface := reflect.New(typ).Elem().Interface() - var encoder ValEncoder = &marshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - if typ.Kind() == reflect.Ptr { - encoder = &optionalEncoder{encoder} - } - return encoder, nil - } - if typ.Implements(textMarshalerType) { - checkIsEmpty, err := createCheckIsEmpty(typ) - if err != nil { - return nil, err - } - templateInterface := reflect.New(typ).Elem().Interface() - var encoder ValEncoder = &textMarshalerEncoder{ - templateInterface: extractInterface(templateInterface), - checkIsEmpty: checkIsEmpty, - } - if typ.Kind() == reflect.Ptr { - encoder = &optionalEncoder{encoder} - } - return encoder, nil - } - if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { - return &base64Codec{}, nil - } - if typ.Implements(anyType) { - return &anyCodec{}, nil - } - return createEncoderOfSimpleType(cfg, typ) -} - -func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) { - kind := typ.Kind() - switch kind { - case reflect.String: - return &stringCodec{}, nil - case reflect.Int: - return &intCodec{}, nil - case reflect.Int8: - return &int8Codec{}, nil - case reflect.Int16: - return &int16Codec{}, nil - case reflect.Int32: - return &int32Codec{}, nil - case reflect.Int64: - return &int64Codec{}, nil - case reflect.Uint: - return &uintCodec{}, nil - case reflect.Uint8: - return &uint8Codec{}, nil - case reflect.Uint16: - return &uint16Codec{}, nil - case reflect.Uint32: - return &uint32Codec{}, nil - case reflect.Uintptr: - return &uintptrCodec{}, nil - case reflect.Uint64: - return &uint64Codec{}, nil - case reflect.Float32: - return &float32Codec{}, nil - case reflect.Float64: - return &float64Codec{}, nil - case reflect.Bool: - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return &structEncoder{}, nil - case reflect.Array: - return &arrayEncoder{}, nil - case reflect.Slice: - return &sliceEncoder{}, nil - case reflect.Map: - return &mapEncoder{}, nil - case reflect.Ptr: - return &optionalEncoder{}, nil - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func createEncoderOfSimpleType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - typeName := typ.String() - kind := typ.Kind() - switch kind { - case reflect.String: - if typeName != "string" { - return encoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem()) - } - return &stringCodec{}, nil - case reflect.Int: - if typeName != "int" { - return encoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem()) - } - return &intCodec{}, nil - case reflect.Int8: - if typeName != "int8" { - return encoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem()) - } - return &int8Codec{}, nil - case reflect.Int16: - if typeName != "int16" { - return encoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem()) - } - return &int16Codec{}, nil - case reflect.Int32: - if typeName != "int32" { - return encoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem()) - } - return &int32Codec{}, nil - case reflect.Int64: - if typeName != "int64" { - return encoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem()) - } - return &int64Codec{}, nil - case reflect.Uint: - if typeName != "uint" { - return encoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem()) - } - return &uintCodec{}, nil - case reflect.Uint8: - if typeName != "uint8" { - return encoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem()) - } - return &uint8Codec{}, nil - case reflect.Uint16: - if typeName != "uint16" { - return encoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem()) - } - return &uint16Codec{}, nil - case reflect.Uint32: - if typeName != "uint32" { - return encoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem()) - } - return &uint32Codec{}, nil - case reflect.Uintptr: - if typeName != "uintptr" { - return encoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem()) - } - return &uintptrCodec{}, nil - case reflect.Uint64: - if typeName != "uint64" { - return encoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem()) - } - return &uint64Codec{}, nil - case reflect.Float32: - if typeName != "float32" { - return encoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem()) - } - return &float32Codec{}, nil - case reflect.Float64: - if typeName != "float64" { - return encoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem()) - } - return &float64Codec{}, nil - case reflect.Bool: - if typeName != "bool" { - return encoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem()) - } - return &boolCodec{}, nil - case reflect.Interface: - if typ.NumMethod() == 0 { - return &emptyInterfaceCodec{}, nil - } - return &nonEmptyInterfaceCodec{}, nil - case reflect.Struct: - return prefix(fmt.Sprintf("[%s]", typeName)).addToEncoder(encoderOfStruct(cfg, typ)) - case reflect.Array: - return prefix("[array]").addToEncoder(encoderOfArray(cfg, typ)) - case reflect.Slice: - return prefix("[slice]").addToEncoder(encoderOfSlice(cfg, typ)) - case reflect.Map: - return prefix("[map]").addToEncoder(encoderOfMap(cfg, typ)) - case reflect.Ptr: - return prefix("[optional]").addToEncoder(encoderOfOptional(cfg, typ)) - default: - return nil, fmt.Errorf("unsupported type: %v", typ) - } -} - -func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - elemType := typ.Elem() - decoder, err := decoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - return &optionalDecoder{elemType, decoder}, nil -} - -func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - elemType := typ.Elem() - elemEncoder, err := encoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - encoder := &optionalEncoder{elemEncoder} - if elemType.Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - return encoder, nil -} - -func decoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - mapInterface := reflect.New(typ).Interface() - return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)}, nil -} - -func extractInterface(val interface{}) emptyInterface { - return *((*emptyInterface)(unsafe.Pointer(&val))) -} - -func encoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - elemType := typ.Elem() - encoder, err := encoderOfType(cfg, elemType) - if err != nil { - return nil, err - } - mapInterface := reflect.New(typ).Elem().Interface() - if cfg.sortMapKeys { - return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil - } - return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_array.go b/vendor/github.com/json-iterator/go/feature_reflect_array.go deleted file mode 100644 index e23f187b7c8..00000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_array.go +++ /dev/null @@ -1,99 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "unsafe" -) - -func decoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - return &arrayDecoder{typ, typ.Elem(), decoder}, nil -} - -func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - encoder, err := encoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - if typ.Elem().Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - return &arrayEncoder{typ, typ.Elem(), encoder}, nil -} - -type arrayEncoder struct { - arrayType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder -} - -func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(ptr) - encoder.elemEncoder.Encode(elemPtr, stream) - for i := 1; i < encoder.arrayType.Len(); i++ { - stream.WriteMore() - elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) - } -} - -func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) { - // special optimization for interface{} - e := (*emptyInterface)(unsafe.Pointer(&val)) - if e.word == nil { - stream.WriteArrayStart() - stream.WriteNil() - stream.WriteArrayEnd() - return - } - elemType := encoder.arrayType.Elem() - if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) { - ptr := uintptr(e.word) - e.word = unsafe.Pointer(&ptr) - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type arrayDecoder struct { - arrayType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder -} - -func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) - } -} - -func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - offset := uintptr(0) - iter.ReadArrayCB(func(iter *Iterator) bool { - if offset < decoder.arrayType.Size() { - decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter) - offset += decoder.elemType.Size() - } else { - iter.Skip() - } - return true - }) -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_extension.go b/vendor/github.com/json-iterator/go/feature_reflect_extension.go deleted file mode 100644 index 3dd38299d49..00000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_extension.go +++ /dev/null @@ -1,413 +0,0 @@ -package jsoniter - -import ( - "fmt" - "reflect" - "sort" - "strings" - "unicode" - "unsafe" -) - -var typeDecoders = map[string]ValDecoder{} -var fieldDecoders = map[string]ValDecoder{} -var typeEncoders = map[string]ValEncoder{} -var fieldEncoders = map[string]ValEncoder{} -var extensions = []Extension{} - -// StructDescriptor describe how should we encode/decode the struct -type StructDescriptor struct { - onePtrEmbedded bool - onePtrOptimization bool - Type reflect.Type - Fields []*Binding -} - -// GetField get one field from the descriptor by its name. -// Can not use map here to keep field orders. -func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { - for _, binding := range structDescriptor.Fields { - if binding.Field.Name == fieldName { - return binding - } - } - return nil -} - -// Binding describe how should we encode/decode the struct field -type Binding struct { - levels []int - Field *reflect.StructField - FromNames []string - ToNames []string - Encoder ValEncoder - Decoder ValDecoder -} - -// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. -// Can also rename fields by UpdateStructDescriptor. -type Extension interface { - UpdateStructDescriptor(structDescriptor *StructDescriptor) - CreateDecoder(typ reflect.Type) ValDecoder - CreateEncoder(typ reflect.Type) ValEncoder - DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder - DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder -} - -// DummyExtension embed this type get dummy implementation for all methods of Extension -type DummyExtension struct { -} - -// UpdateStructDescriptor No-op -func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateDecoder No-op -func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder { - return nil -} - -// CreateEncoder No-op -func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type funcDecoder struct { - fun DecoderFunc -} - -func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.fun(ptr, iter) -} - -type funcEncoder struct { - fun EncoderFunc - isEmptyFunc func(ptr unsafe.Pointer) bool -} - -func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.fun(ptr, stream) -} - -func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { - if encoder.isEmptyFunc == nil { - return false - } - return encoder.isEmptyFunc(ptr) -} - -// DecoderFunc the function form of TypeDecoder -type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) - -// EncoderFunc the function form of TypeEncoder -type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) - -// RegisterTypeDecoderFunc register TypeDecoder for a type with function -func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { - typeDecoders[typ] = &funcDecoder{fun} -} - -// RegisterTypeDecoder register TypeDecoder for a typ -func RegisterTypeDecoder(typ string, decoder ValDecoder) { - typeDecoders[typ] = decoder -} - -// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function -func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { - RegisterFieldDecoder(typ, field, &funcDecoder{fun}) -} - -// RegisterFieldDecoder register TypeDecoder for a struct field -func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { - fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder -} - -// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function -func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} -} - -// RegisterTypeEncoder register TypeEncoder for a type -func RegisterTypeEncoder(typ string, encoder ValEncoder) { - typeEncoders[typ] = encoder -} - -// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function -func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) -} - -// RegisterFieldEncoder register TypeEncoder for a struct field -func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { - fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder -} - -// RegisterExtension register extension -func RegisterExtension(extension Extension) { - extensions = append(extensions, extension) -} - -func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { - decoder := _getTypeDecoderFromExtension(typ) - if decoder != nil { - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - } - return decoder -} -func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { - for _, extension := range extensions { - decoder := extension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - } - typeName := typ.String() - decoder := typeDecoders[typeName] - if decoder != nil { - return decoder - } - if typ.Kind() == reflect.Ptr { - decoder := typeDecoders[typ.Elem().String()] - if decoder != nil { - return &optionalDecoder{typ.Elem(), decoder} - } - } - return nil -} - -func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { - encoder := _getTypeEncoderFromExtension(typ) - if encoder != nil { - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - } - return encoder -} - -func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { - for _, extension := range extensions { - encoder := extension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - } - typeName := typ.String() - encoder := typeEncoders[typeName] - if encoder != nil { - return encoder - } - if typ.Kind() == reflect.Ptr { - encoder := typeEncoders[typ.Elem().String()] - if encoder != nil { - return &optionalEncoder{encoder} - } - } - return nil -} - -func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) { - embeddedBindings := []*Binding{} - bindings := []*Binding{} - for i := 0; i < typ.NumField(); i++ { - field := typ.Field(i) - tag := field.Tag.Get(cfg.getTagKey()) - tagParts := strings.Split(tag, ",") - if tag == "-" { - continue - } - if field.Anonymous && (tag == "" || tagParts[0] == "") { - if field.Type.Kind() == reflect.Struct { - structDescriptor, err := describeStruct(cfg, field.Type) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} - binding.Decoder = &structFieldDecoder{&field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { - structDescriptor, err := describeStruct(cfg, field.Type.Elem()) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &optionalEncoder{binding.Encoder} - binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} - binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder} - binding.Decoder = &structFieldDecoder{&field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } - } - fieldNames := calcFieldNames(field.Name, tagParts[0], tag) - fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name) - decoder := fieldDecoders[fieldCacheKey] - if decoder == nil { - var err error - decoder, err = decoderOfType(cfg, field.Type) - if err != nil { - return nil, err - } - } - encoder := fieldEncoders[fieldCacheKey] - if encoder == nil { - var err error - encoder, err = encoderOfType(cfg, field.Type) - if err != nil { - return nil, err - } - // map is stored as pointer in the struct - if field.Type.Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - } - binding := &Binding{ - Field: &field, - FromNames: fieldNames, - ToNames: fieldNames, - Decoder: decoder, - Encoder: encoder, - } - binding.levels = []int{i} - bindings = append(bindings, binding) - } - return createStructDescriptor(cfg, typ, bindings, embeddedBindings), nil -} -func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { - onePtrEmbedded := false - onePtrOptimization := false - if typ.NumField() == 1 { - firstField := typ.Field(0) - switch firstField.Type.Kind() { - case reflect.Ptr: - if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct { - onePtrEmbedded = true - } - fallthrough - case reflect.Map: - onePtrOptimization = true - case reflect.Struct: - onePtrOptimization = isStructOnePtr(firstField.Type) - } - } - structDescriptor := &StructDescriptor{ - onePtrEmbedded: onePtrEmbedded, - onePtrOptimization: onePtrOptimization, - Type: typ, - Fields: bindings, - } - for _, extension := range extensions { - extension.UpdateStructDescriptor(structDescriptor) - } - processTags(structDescriptor, cfg) - // merge normal & embedded bindings & sort with original order - allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) - sort.Sort(allBindings) - structDescriptor.Fields = allBindings - return structDescriptor -} - -func isStructOnePtr(typ reflect.Type) bool { - if typ.NumField() == 1 { - firstField := typ.Field(0) - switch firstField.Type.Kind() { - case reflect.Ptr: - return true - case reflect.Map: - return true - case reflect.Struct: - return isStructOnePtr(firstField.Type) - } - } - return false -} - -type sortableBindings []*Binding - -func (bindings sortableBindings) Len() int { - return len(bindings) -} - -func (bindings sortableBindings) Less(i, j int) bool { - left := bindings[i].levels - right := bindings[j].levels - k := 0 - for { - if left[k] < right[k] { - return true - } else if left[k] > right[k] { - return false - } - k++ - } -} - -func (bindings sortableBindings) Swap(i, j int) { - bindings[i], bindings[j] = bindings[j], bindings[i] -} - -func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { - for _, binding := range structDescriptor.Fields { - shouldOmitEmpty := false - tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",") - for _, tagPart := range tagParts[1:] { - if tagPart == "omitempty" { - shouldOmitEmpty = true - } else if tagPart == "string" { - if binding.Field.Type.Kind() == reflect.String { - binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} - binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} - } else { - binding.Decoder = &stringModeNumberDecoder{binding.Decoder} - binding.Encoder = &stringModeNumberEncoder{binding.Encoder} - } - } - } - binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} - binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} - } -} - -func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { - // ignore? - if wholeTag == "-" { - return []string{} - } - // rename? - var fieldNames []string - if tagProvidedFieldName == "" { - fieldNames = []string{originalFieldName} - } else { - fieldNames = []string{tagProvidedFieldName} - } - // private? - isNotExported := unicode.IsLower(rune(originalFieldName[0])) - if isNotExported { - fieldNames = []string{} - } - return fieldNames -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_map.go b/vendor/github.com/json-iterator/go/feature_reflect_map.go deleted file mode 100644 index 005671e01be..00000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_map.go +++ /dev/null @@ -1,244 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "reflect" - "sort" - "strconv" - "unsafe" -) - -type mapDecoder struct { - mapType reflect.Type - keyType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder - mapInterface emptyInterface -} - -func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - // dark magic to cast unsafe.Pointer back to interface{} using reflect.Type - mapInterface := decoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface).Elem() - if iter.ReadNil() { - realVal.Set(reflect.Zero(decoder.mapType)) - return - } - if realVal.IsNil() { - realVal.Set(reflect.MakeMap(realVal.Type())) - } - iter.ReadMapCB(func(iter *Iterator, keyStr string) bool { - elem := reflect.New(decoder.elemType) - decoder.elemDecoder.Decode(unsafe.Pointer(elem.Pointer()), iter) - // to put into map, we have to use reflection - keyType := decoder.keyType - // TODO: remove this from loop - switch { - case keyType.Kind() == reflect.String: - realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem()) - return true - case keyType.Implements(textUnmarshalerType): - textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler) - err := textUnmarshaler.UnmarshalText([]byte(keyStr)) - if err != nil { - iter.ReportError("read map key as TextUnmarshaler", err.Error()) - return false - } - realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem()) - return true - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): - textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler) - err := textUnmarshaler.UnmarshalText([]byte(keyStr)) - if err != nil { - iter.ReportError("read map key as TextUnmarshaler", err.Error()) - return false - } - realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem()) - return true - default: - switch keyType.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(keyStr, 10, 64) - if err != nil || reflect.Zero(keyType).OverflowInt(n) { - iter.ReportError("read map key as int64", "read int64 failed") - return false - } - realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(keyStr, 10, 64) - if err != nil || reflect.Zero(keyType).OverflowUint(n) { - iter.ReportError("read map key as uint64", "read uint64 failed") - return false - } - realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) - return true - } - } - iter.ReportError("read map key", "unexpected map key type "+keyType.String()) - return true - }) -} - -type mapEncoder struct { - mapType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder - mapInterface emptyInterface -} - -func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - stream.WriteObjectStart() - for i, key := range realVal.MapKeys() { - if i != 0 { - stream.WriteMore() - } - encodeMapKey(key, stream) - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - val := realVal.MapIndex(key).Interface() - encoder.elemEncoder.EncodeInterface(val, stream) - } - stream.WriteObjectEnd() -} - -func encodeMapKey(key reflect.Value, stream *Stream) { - if key.Kind() == reflect.String { - stream.WriteString(key.String()) - return - } - if tm, ok := key.Interface().(encoding.TextMarshaler); ok { - buf, err := tm.MarshalText() - if err != nil { - stream.Error = err - return - } - stream.writeByte('"') - stream.Write(buf) - stream.writeByte('"') - return - } - switch key.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - stream.writeByte('"') - stream.WriteInt64(key.Int()) - stream.writeByte('"') - return - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - stream.writeByte('"') - stream.WriteUint64(key.Uint()) - stream.writeByte('"') - return - } - stream.Error = &json.UnsupportedTypeError{Type: key.Type()} -} - -func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - return realVal.Len() == 0 -} - -type sortKeysMapEncoder struct { - mapType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder - mapInterface emptyInterface -} - -func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - - // Extract and sort the keys. - keys := realVal.MapKeys() - sv := stringValues(make([]reflectWithString, len(keys))) - for i, v := range keys { - sv[i].v = v - if err := sv[i].resolve(); err != nil { - stream.Error = err - return - } - } - sort.Sort(sv) - - stream.WriteObjectStart() - for i, key := range sv { - if i != 0 { - stream.WriteMore() - } - stream.WriteVal(key.s) // might need html escape, so can not WriteString directly - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - val := realVal.MapIndex(key.v).Interface() - encoder.elemEncoder.EncodeInterface(val, stream) - } - stream.WriteObjectEnd() -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflectWithString - -type reflectWithString struct { - v reflect.Value - s string -} - -func (w *reflectWithString) resolve() error { - if w.v.Kind() == reflect.String { - w.s = w.v.String() - return nil - } - if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { - buf, err := tm.MarshalText() - w.s = string(buf) - return err - } - switch w.v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - w.s = strconv.FormatInt(w.v.Int(), 10) - return nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - w.s = strconv.FormatUint(w.v.Uint(), 10) - return nil - } - return &json.UnsupportedTypeError{Type: w.v.Type()} -} - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s } - -func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - mapInterface := encoder.mapInterface - mapInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) - realVal := reflect.ValueOf(*realInterface) - return realVal.Len() == 0 -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_native.go b/vendor/github.com/json-iterator/go/feature_reflect_native.go deleted file mode 100644 index b37dab3d8a1..00000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_native.go +++ /dev/null @@ -1,672 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/base64" - "encoding/json" - "unsafe" -) - -type stringCodec struct { -} - -func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*string)(ptr)) = iter.ReadString() -} - -func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteString(str) -} - -func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -type intCodec struct { -} - -func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*int)(ptr)) = iter.ReadInt() -} - -func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt(*((*int)(ptr))) -} - -func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int)(ptr)) == 0 -} - -type uintptrCodec struct { -} - -func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) -} - -func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(uint64(*((*uintptr)(ptr)))) -} - -func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uintptr)(ptr)) == 0 -} - -type int8Codec struct { -} - -func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*int8)(ptr)) = iter.ReadInt8() -} - -func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt8(*((*int8)(ptr))) -} - -func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int8)(ptr)) == 0 -} - -type int16Codec struct { -} - -func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*int16)(ptr)) = iter.ReadInt16() -} - -func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt16(*((*int16)(ptr))) -} - -func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int16)(ptr)) == 0 -} - -type int32Codec struct { -} - -func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*int32)(ptr)) = iter.ReadInt32() -} - -func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt32(*((*int32)(ptr))) -} - -func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int32)(ptr)) == 0 -} - -type int64Codec struct { -} - -func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*int64)(ptr)) = iter.ReadInt64() -} - -func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt64(*((*int64)(ptr))) -} - -func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int64)(ptr)) == 0 -} - -type uintCodec struct { -} - -func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uint)(ptr)) = iter.ReadUint() -} - -func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint(*((*uint)(ptr))) -} - -func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint)(ptr)) == 0 -} - -type uint8Codec struct { -} - -func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uint8)(ptr)) = iter.ReadUint8() -} - -func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint8(*((*uint8)(ptr))) -} - -func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint8)(ptr)) == 0 -} - -type uint16Codec struct { -} - -func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uint16)(ptr)) = iter.ReadUint16() -} - -func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint16(*((*uint16)(ptr))) -} - -func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint16)(ptr)) == 0 -} - -type uint32Codec struct { -} - -func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uint32)(ptr)) = iter.ReadUint32() -} - -func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint32(*((*uint32)(ptr))) -} - -func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint32)(ptr)) == 0 -} - -type uint64Codec struct { -} - -func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*uint64)(ptr)) = iter.ReadUint64() -} - -func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(*((*uint64)(ptr))) -} - -func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint64)(ptr)) == 0 -} - -type float32Codec struct { -} - -func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*float32)(ptr)) = iter.ReadFloat32() -} - -func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32(*((*float32)(ptr))) -} - -func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type float64Codec struct { -} - -func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*float64)(ptr)) = iter.ReadFloat64() -} - -func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64(*((*float64)(ptr))) -} - -func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -type boolCodec struct { -} - -func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*bool)(ptr)) = iter.ReadBool() -} - -func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteBool(*((*bool)(ptr))) -} - -func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, codec) -} - -func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { - return !(*((*bool)(ptr))) -} - -type emptyInterfaceCodec struct { -} - -func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*interface{})(ptr)) = iter.Read() -} - -func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteVal(*((*interface{})(ptr))) -} - -func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteVal(val) -} - -func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { - return ptr == nil -} - -type nonEmptyInterfaceCodec struct { -} - -func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - if nonEmptyInterface.itab == nil { - iter.ReportError("read non-empty interface", "do not know which concrete type to decode to") - return - } - var i interface{} - e := (*emptyInterface)(unsafe.Pointer(&i)) - e.typ = nonEmptyInterface.itab.typ - e.word = nonEmptyInterface.word - iter.ReadVal(&i) - nonEmptyInterface.word = e.word -} - -func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - var i interface{} - e := (*emptyInterface)(unsafe.Pointer(&i)) - e.typ = nonEmptyInterface.itab.typ - e.word = nonEmptyInterface.word - stream.WriteVal(i) -} - -func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteVal(val) -} - -func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { - nonEmptyInterface := (*nonEmptyInterface)(ptr) - return nonEmptyInterface.word == nil -} - -type anyCodec struct { -} - -func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*Any)(ptr)) = iter.ReadAny() -} - -func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - (*((*Any)(ptr))).WriteTo(stream) -} - -func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) { - (val.(Any)).WriteTo(stream) -} - -func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { - return (*((*Any)(ptr))).Size() == 0 -} - -type jsonNumberCodec struct { -} - -func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) -} - -func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.Number)(ptr)))) -} - -func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(json.Number))) -} - -func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.Number)(ptr))) == 0 -} - -type jsoniterNumberCodec struct { -} - -func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) -} - -func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*Number)(ptr)))) -} - -func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(Number))) -} - -func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*Number)(ptr))) == 0 -} - -type jsonRawMessageCodec struct { -} - -func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) -} - -func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(json.RawMessage))) -} - -func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 -} - -type jsoniterRawMessageCodec struct { -} - -func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) -} - -func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { - stream.WriteRaw(string(val.(RawMessage))) -} - -func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*RawMessage)(ptr))) == 0 -} - -type base64Codec struct { - sliceDecoder ValDecoder -} - -func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - ptrSlice := (*sliceHeader)(ptr) - ptrSlice.Len = 0 - ptrSlice.Cap = 0 - ptrSlice.Data = nil - return - } - switch iter.WhatIsNext() { - case StringValue: - encoding := base64.StdEncoding - src := iter.SkipAndReturnBytes() - src = src[1 : len(src)-1] - decodedLen := encoding.DecodedLen(len(src)) - dst := make([]byte, decodedLen) - len, err := encoding.Decode(dst, src) - if err != nil { - iter.ReportError("decode base64", err.Error()) - } else { - dst = dst[:len] - dstSlice := (*sliceHeader)(unsafe.Pointer(&dst)) - ptrSlice := (*sliceHeader)(ptr) - ptrSlice.Data = dstSlice.Data - ptrSlice.Cap = dstSlice.Cap - ptrSlice.Len = dstSlice.Len - } - case ArrayValue: - codec.sliceDecoder.Decode(ptr, iter) - default: - iter.ReportError("base64Codec", "invalid input") - } -} - -func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - toGrow := encoding.EncodedLen(len(src)) - stream.ensure(toGrow) - encoding.Encode(stream.buf[stream.n:], src) - stream.n += toGrow - stream.writeByte('"') -} - -func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) { - ptr := extractInterface(val).word - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - toGrow := encoding.EncodedLen(len(src)) - stream.ensure(toGrow) - encoding.Encode(stream.buf[stream.n:], src) - stream.n += toGrow - stream.writeByte('"') -} - -func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*[]byte)(ptr))) == 0 -} - -type stringModeNumberDecoder struct { - elemDecoder ValDecoder -} - -func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect "`) - return - } - decoder.elemDecoder.Decode(ptr, iter) - if iter.Error != nil { - return - } - c = iter.readByte() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect "`) - return - } -} - -type stringModeStringDecoder struct { - elemDecoder ValDecoder - cfg *frozenConfig -} - -func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.elemDecoder.Decode(ptr, iter) - str := *((*string)(ptr)) - tempIter := decoder.cfg.BorrowIterator([]byte(str)) - defer decoder.cfg.ReturnIterator(tempIter) - *((*string)(ptr)) = tempIter.ReadString() -} - -type stringModeNumberEncoder struct { - elemEncoder ValEncoder -} - -func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.elemEncoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type stringModeStringEncoder struct { - elemEncoder ValEncoder - cfg *frozenConfig -} - -func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - tempStream := encoder.cfg.BorrowStream(nil) - defer encoder.cfg.ReturnStream(tempStream) - encoder.elemEncoder.Encode(ptr, tempStream) - stream.WriteString(string(tempStream.Buffer())) -} - -func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type marshalerEncoder struct { - templateInterface emptyInterface - checkIsEmpty checkIsEmpty -} - -func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - templateInterface := encoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - marshaler := (*realInterface).(json.Marshaler) - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - stream.Write(bytes) - } -} -func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type textMarshalerEncoder struct { - templateInterface emptyInterface - checkIsEmpty checkIsEmpty -} - -func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - templateInterface := encoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - marshaler := (*realInterface).(encoding.TextMarshaler) - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - stream.WriteString(string(bytes)) - } -} - -func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type unmarshalerDecoder struct { - templateInterface emptyInterface -} - -func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - templateInterface := decoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - unmarshaler := (*realInterface).(json.Unmarshaler) - iter.nextToken() - iter.unreadByte() // skip spaces - bytes := iter.SkipAndReturnBytes() - err := unmarshaler.UnmarshalJSON(bytes) - if err != nil { - iter.ReportError("unmarshalerDecoder", err.Error()) - } -} - -type textUnmarshalerDecoder struct { - templateInterface emptyInterface -} - -func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - templateInterface := decoder.templateInterface - templateInterface.word = ptr - realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) - unmarshaler := (*realInterface).(encoding.TextUnmarshaler) - str := iter.ReadString() - err := unmarshaler.UnmarshalText([]byte(str)) - if err != nil { - iter.ReportError("textUnmarshalerDecoder", err.Error()) - } -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_object.go b/vendor/github.com/json-iterator/go/feature_reflect_object.go deleted file mode 100644 index 59b1235c0d4..00000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_object.go +++ /dev/null @@ -1,196 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "strings" - "unsafe" -) - -func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - type bindingTo struct { - binding *Binding - toName string - ignored bool - } - orderedBindings := []*bindingTo{} - structDescriptor, err := describeStruct(cfg, typ) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - for _, toName := range binding.ToNames { - new := &bindingTo{ - binding: binding, - toName: toName, - } - for _, old := range orderedBindings { - if old.toName != toName { - continue - } - old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding) - } - orderedBindings = append(orderedBindings, new) - } - } - if len(orderedBindings) == 0 { - return &emptyStructEncoder{}, nil - } - finalOrderedFields := []structFieldTo{} - for _, bindingTo := range orderedBindings { - if !bindingTo.ignored { - finalOrderedFields = append(finalOrderedFields, structFieldTo{ - encoder: bindingTo.binding.Encoder.(*structFieldEncoder), - toName: bindingTo.toName, - }) - } - } - return &structEncoder{structDescriptor.onePtrEmbedded, structDescriptor.onePtrOptimization, finalOrderedFields}, nil -} - -func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { - newTagged := new.Field.Tag.Get(cfg.getTagKey()) != "" - oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != "" - if newTagged { - if oldTagged { - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } else { - return true, false - } - } else { - if oldTagged { - return true, false - } - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } -} - -func decoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - bindings := map[string]*Binding{} - structDescriptor, err := describeStruct(cfg, typ) - if err != nil { - return nil, err - } - for _, binding := range structDescriptor.Fields { - for _, fromName := range binding.FromNames { - old := bindings[fromName] - if old == nil { - bindings[fromName] = binding - continue - } - ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding) - if ignoreOld { - delete(bindings, fromName) - } - if !ignoreNew { - bindings[fromName] = binding - } - } - } - fields := map[string]*structFieldDecoder{} - for k, binding := range bindings { - fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) - } - return createStructDecoder(typ, fields) -} - -type structFieldEncoder struct { - field *reflect.StructField - fieldEncoder ValEncoder - omitempty bool -} - -func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) - encoder.fieldEncoder.Encode(fieldPtr, stream) - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error()) - } -} - -func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { - fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) - return encoder.fieldEncoder.IsEmpty(fieldPtr) -} - -type structEncoder struct { - onePtrEmbedded bool - onePtrOptimization bool - fields []structFieldTo -} - -type structFieldTo struct { - encoder *structFieldEncoder - toName string -} - -func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteObjectStart() - isNotFirst := false - for _, field := range encoder.fields { - if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { - continue - } - if isNotFirst { - stream.WriteMore() - } - stream.WriteObjectField(field.toName) - field.encoder.Encode(ptr, stream) - isNotFirst = true - } - stream.WriteObjectEnd() -} - -func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) { - e := (*emptyInterface)(unsafe.Pointer(&val)) - if encoder.onePtrOptimization { - if e.word == nil && encoder.onePtrEmbedded { - stream.WriteObjectStart() - stream.WriteObjectEnd() - return - } - ptr := uintptr(e.word) - e.word = unsafe.Pointer(&ptr) - } - if reflect.TypeOf(val).Kind() == reflect.Ptr { - encoder.Encode(unsafe.Pointer(&e.word), stream) - } else { - encoder.Encode(e.word, stream) - } -} - -func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type emptyStructEncoder struct { -} - -func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyObject() -} - -func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_slice.go b/vendor/github.com/json-iterator/go/feature_reflect_slice.go deleted file mode 100644 index 7377eec7b3b..00000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_slice.go +++ /dev/null @@ -1,149 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "unsafe" -) - -func decoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) { - decoder, err := decoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - return &sliceDecoder{typ, typ.Elem(), decoder}, nil -} - -func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { - encoder, err := encoderOfType(cfg, typ.Elem()) - if err != nil { - return nil, err - } - if typ.Elem().Kind() == reflect.Map { - encoder = &optionalEncoder{encoder} - } - return &sliceEncoder{typ, typ.Elem(), encoder}, nil -} - -type sliceEncoder struct { - sliceType reflect.Type - elemType reflect.Type - elemEncoder ValEncoder -} - -func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - slice := (*sliceHeader)(ptr) - if slice.Data == nil { - stream.WriteNil() - return - } - if slice.Len == 0 { - stream.WriteEmptyArray() - return - } - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(slice.Data) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - for i := 1; i < slice.Len; i++ { - stream.WriteMore() - elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) - encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) - } -} - -func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) { - WriteToStream(val, stream, encoder) -} - -func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - slice := (*sliceHeader)(ptr) - return slice.Len == 0 -} - -type sliceDecoder struct { - sliceType reflect.Type - elemType reflect.Type - elemDecoder ValDecoder -} - -// sliceHeader is a safe version of SliceHeader used within this package. -type sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int -} - -func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) - } -} - -func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - slice := (*sliceHeader)(ptr) - if iter.ReadNil() { - slice.Len = 0 - slice.Cap = 0 - slice.Data = nil - return - } - reuseSlice(slice, decoder.sliceType, 4) - slice.Len = 0 - offset := uintptr(0) - iter.ReadArrayCB(func(iter *Iterator) bool { - growOne(slice, decoder.sliceType, decoder.elemType) - decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter) - offset += decoder.elemType.Size() - return true - }) -} - -// grow grows the slice s so that it can hold extra more values, allocating -// more capacity if needed. It also returns the old and new slice lengths. -func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) { - newLen := slice.Len + 1 - if newLen <= slice.Cap { - slice.Len = newLen - return - } - newCap := slice.Cap - if newCap == 0 { - newCap = 1 - } else { - for newCap < newLen { - if slice.Len < 1024 { - newCap += newCap - } else { - newCap += newCap / 4 - } - } - } - newVal := reflect.MakeSlice(sliceType, newLen, newCap) - dst := unsafe.Pointer(newVal.Pointer()) - // copy old array into new array - originalBytesCount := uintptr(slice.Len) * elementType.Size() - srcPtr := (*[1 << 30]byte)(slice.Data) - dstPtr := (*[1 << 30]byte)(dst) - for i := uintptr(0); i < originalBytesCount; i++ { - dstPtr[i] = srcPtr[i] - } - slice.Data = dst - slice.Len = newLen - slice.Cap = newCap -} - -func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) { - if expectedCap <= slice.Cap { - return - } - newVal := reflect.MakeSlice(sliceType, 0, expectedCap) - dst := unsafe.Pointer(newVal.Pointer()) - slice.Data = dst - slice.Cap = expectedCap -} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go deleted file mode 100644 index b3417fd73a7..00000000000 --- a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go +++ /dev/null @@ -1,916 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "reflect" - "strings" - "unsafe" -) - -func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) (ValDecoder, error) { - knownHash := map[int32]struct{}{ - 0: {}, - } - switch len(fields) { - case 0: - return &skipObjectDecoder{typ}, nil - case 1: - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}, nil - } - case 2: - var fieldHash1 int32 - var fieldHash2 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldHash1 == 0 { - fieldHash1 = fieldHash - fieldDecoder1 = fieldDecoder - } else { - fieldHash2 = fieldHash - fieldDecoder2 = fieldDecoder - } - } - return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}, nil - case 3: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } - } - return &threeFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3}, nil - case 4: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } - } - return &fourFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4}, nil - case 5: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } - } - return &fiveFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5}, nil - case 6: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } - } - return &sixFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6}, nil - case 7: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } - } - return &sevenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7}, nil - case 8: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } - } - return &eightFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8}, nil - case 9: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 - var fieldName9 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } - } - return &nineFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9}, nil - case 10: - var fieldName1 int32 - var fieldName2 int32 - var fieldName3 int32 - var fieldName4 int32 - var fieldName5 int32 - var fieldName6 int32 - var fieldName7 int32 - var fieldName8 int32 - var fieldName9 int32 - var fieldName10 int32 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - var fieldDecoder10 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields}, nil - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else if fieldName9 == 0 { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } else { - fieldName10 = fieldHash - fieldDecoder10 = fieldDecoder - } - } - return &tenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9, - fieldName10, fieldDecoder10}, nil - } - return &generalStructDecoder{typ, fields}, nil -} - -type generalStructDecoder struct { - typ reflect.Type - fields map[string]*structFieldDecoder -} - -func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - fieldBytes := iter.readObjectFieldAsBytes() - field := *(*string)(unsafe.Pointer(&fieldBytes)) - fieldDecoder := decoder.fields[strings.ToLower(field)] - if fieldDecoder == nil { - iter.Skip() - } else { - fieldDecoder.Decode(ptr, iter) - } - for iter.nextToken() == ',' { - fieldBytes = iter.readObjectFieldAsBytes() - field = *(*string)(unsafe.Pointer(&fieldBytes)) - fieldDecoder = decoder.fields[strings.ToLower(field)] - if fieldDecoder == nil { - iter.Skip() - } else { - fieldDecoder.Decode(ptr, iter) - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type skipObjectDecoder struct { - typ reflect.Type -} - -func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valueType := iter.WhatIsNext() - if valueType != ObjectValue && valueType != NilValue { - iter.ReportError("skipObjectDecoder", "expect object or null") - return - } - iter.Skip() -} - -type oneFieldStructDecoder struct { - typ reflect.Type - fieldHash int32 - fieldDecoder *structFieldDecoder -} - -func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - if iter.readFieldHash() == decoder.fieldHash { - decoder.fieldDecoder.Decode(ptr, iter) - } else { - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type twoFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder -} - -func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type threeFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder -} - -func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type fourFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder -} - -func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type fiveFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder -} - -func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type sixFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder -} - -func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type sevenFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder -} - -func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type eightFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder - fieldHash8 int32 - fieldDecoder8 *structFieldDecoder -} - -func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type nineFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder - fieldHash8 int32 - fieldDecoder8 *structFieldDecoder - fieldHash9 int32 - fieldDecoder9 *structFieldDecoder -} - -func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type tenFieldsStructDecoder struct { - typ reflect.Type - fieldHash1 int32 - fieldDecoder1 *structFieldDecoder - fieldHash2 int32 - fieldDecoder2 *structFieldDecoder - fieldHash3 int32 - fieldDecoder3 *structFieldDecoder - fieldHash4 int32 - fieldDecoder4 *structFieldDecoder - fieldHash5 int32 - fieldDecoder5 *structFieldDecoder - fieldHash6 int32 - fieldDecoder6 *structFieldDecoder - fieldHash7 int32 - fieldDecoder7 *structFieldDecoder - fieldHash8 int32 - fieldDecoder8 *structFieldDecoder - fieldHash9 int32 - fieldDecoder9 *structFieldDecoder - fieldHash10 int32 - fieldDecoder10 *structFieldDecoder -} - -func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - case decoder.fieldHash10: - decoder.fieldDecoder10.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) - } -} - -type structFieldDecoder struct { - field *reflect.StructField - fieldDecoder ValDecoder -} - -func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset) - decoder.fieldDecoder.Decode(fieldPtr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error()) - } -} diff --git a/vendor/github.com/json-iterator/go/feature_stream.go b/vendor/github.com/json-iterator/go/feature_stream.go deleted file mode 100644 index 9c8470a03ae..00000000000 --- a/vendor/github.com/json-iterator/go/feature_stream.go +++ /dev/null @@ -1,305 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// Stream is a io.Writer like object, with JSON specific write functions. -// Error is not returned as return value, but stored as Error member on this stream instance. -type Stream struct { - cfg *frozenConfig - out io.Writer - buf []byte - n int - Error error - indention int -} - -// NewStream create new stream instance. -// cfg can be jsoniter.ConfigDefault. -// out can be nil if write to internal buffer. -// bufSize is the initial size for the internal buffer in bytes. -func NewStream(cfg API, out io.Writer, bufSize int) *Stream { - return &Stream{ - cfg: cfg.(*frozenConfig), - out: out, - buf: make([]byte, bufSize), - n: 0, - Error: nil, - indention: 0, - } -} - -// Pool returns a pool can provide more stream with same configuration -func (stream *Stream) Pool() StreamPool { - return stream.cfg -} - -// Reset reuse this stream instance by assign a new writer -func (stream *Stream) Reset(out io.Writer) { - stream.out = out - stream.n = 0 -} - -// Available returns how many bytes are unused in the buffer. -func (stream *Stream) Available() int { - return len(stream.buf) - stream.n -} - -// Buffered returns the number of bytes that have been written into the current buffer. -func (stream *Stream) Buffered() int { - return stream.n -} - -// Buffer if writer is nil, use this method to take the result -func (stream *Stream) Buffer() []byte { - return stream.buf[:stream.n] -} - -// Write writes the contents of p into the buffer. -// It returns the number of bytes written. -// If nn < len(p), it also returns an error explaining -// why the write is short. -func (stream *Stream) Write(p []byte) (nn int, err error) { - for len(p) > stream.Available() && stream.Error == nil { - if stream.out == nil { - stream.growAtLeast(len(p)) - } else { - var n int - if stream.Buffered() == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, stream.Error = stream.out.Write(p) - } else { - n = copy(stream.buf[stream.n:], p) - stream.n += n - stream.Flush() - } - nn += n - p = p[n:] - } - } - if stream.Error != nil { - return nn, stream.Error - } - n := copy(stream.buf[stream.n:], p) - stream.n += n - nn += n - return nn, nil -} - -// WriteByte writes a single byte. -func (stream *Stream) writeByte(c byte) { - if stream.Error != nil { - return - } - if stream.Available() < 1 { - stream.growAtLeast(1) - } - stream.buf[stream.n] = c - stream.n++ -} - -func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 2 { - stream.growAtLeast(2) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.n += 2 -} - -func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 3 { - stream.growAtLeast(3) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.n += 3 -} - -func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 4 { - stream.growAtLeast(4) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.buf[stream.n+3] = c4 - stream.n += 4 -} - -func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { - if stream.Error != nil { - return - } - if stream.Available() < 5 { - stream.growAtLeast(5) - } - stream.buf[stream.n] = c1 - stream.buf[stream.n+1] = c2 - stream.buf[stream.n+2] = c3 - stream.buf[stream.n+3] = c4 - stream.buf[stream.n+4] = c5 - stream.n += 5 -} - -// Flush writes any buffered data to the underlying io.Writer. -func (stream *Stream) Flush() error { - if stream.out == nil { - return nil - } - if stream.Error != nil { - return stream.Error - } - if stream.n == 0 { - return nil - } - n, err := stream.out.Write(stream.buf[0:stream.n]) - if n < stream.n && err == nil { - err = io.ErrShortWrite - } - if err != nil { - if n > 0 && n < stream.n { - copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n]) - } - stream.n -= n - stream.Error = err - return err - } - stream.n = 0 - return nil -} - -func (stream *Stream) ensure(minimal int) { - available := stream.Available() - if available < minimal { - stream.growAtLeast(minimal) - } -} - -func (stream *Stream) growAtLeast(minimal int) { - if stream.out != nil { - stream.Flush() - } - toGrow := len(stream.buf) - if toGrow < minimal { - toGrow = minimal - } - newBuf := make([]byte, len(stream.buf)+toGrow) - copy(newBuf, stream.Buffer()) - stream.buf = newBuf -} - -// WriteRaw write string out without quotes, just like []byte -func (stream *Stream) WriteRaw(s string) { - stream.ensure(len(s)) - if stream.Error != nil { - return - } - n := copy(stream.buf[stream.n:], s) - stream.n += n -} - -// WriteNil write null to stream -func (stream *Stream) WriteNil() { - stream.writeFourBytes('n', 'u', 'l', 'l') -} - -// WriteTrue write true to stream -func (stream *Stream) WriteTrue() { - stream.writeFourBytes('t', 'r', 'u', 'e') -} - -// WriteFalse write false to stream -func (stream *Stream) WriteFalse() { - stream.writeFiveBytes('f', 'a', 'l', 's', 'e') -} - -// WriteBool write true or false into stream -func (stream *Stream) WriteBool(val bool) { - if val { - stream.WriteTrue() - } else { - stream.WriteFalse() - } -} - -// WriteObjectStart write { with possible indention -func (stream *Stream) WriteObjectStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('{') - stream.writeIndention(0) -} - -// WriteObjectField write "field": with possible indention -func (stream *Stream) WriteObjectField(field string) { - stream.WriteString(field) - if stream.indention > 0 { - stream.writeTwoBytes(':', ' ') - } else { - stream.writeByte(':') - } -} - -// WriteObjectEnd write } with possible indention -func (stream *Stream) WriteObjectEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte('}') -} - -// WriteEmptyObject write {} -func (stream *Stream) WriteEmptyObject() { - stream.writeByte('{') - stream.writeByte('}') -} - -// WriteMore write , with possible indention -func (stream *Stream) WriteMore() { - stream.writeByte(',') - stream.writeIndention(0) -} - -// WriteArrayStart write [ with possible indention -func (stream *Stream) WriteArrayStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('[') - stream.writeIndention(0) -} - -// WriteEmptyArray write [] -func (stream *Stream) WriteEmptyArray() { - stream.writeByte('[') - stream.writeByte(']') -} - -// WriteArrayEnd write ] with possible indention -func (stream *Stream) WriteArrayEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte(']') -} - -func (stream *Stream) writeIndention(delta int) { - if stream.indention == 0 { - return - } - stream.writeByte('\n') - toWrite := stream.indention - delta - stream.ensure(toWrite) - for i := 0; i < toWrite && stream.n < len(stream.buf); i++ { - stream.buf[stream.n] = ' ' - stream.n++ - } -} diff --git a/vendor/github.com/json-iterator/go/feature_stream_float.go b/vendor/github.com/json-iterator/go/feature_stream_float.go deleted file mode 100644 index 9a404e11d4a..00000000000 --- a/vendor/github.com/json-iterator/go/feature_stream_float.go +++ /dev/null @@ -1,96 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var pow10 []uint64 - -func init() { - pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} -} - -// WriteFloat32 write float32 to stream -func (stream *Stream) WriteFloat32(val float32) { - abs := math.Abs(float64(val)) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if float32(abs) < 1e-6 || float32(abs) >= 1e21 { - fmt = 'e' - } - } - stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32)) -} - -// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat32Lossy(val float32) { - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat32(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(float64(val)*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - stream.ensure(10) - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[stream.n-1] == '0' { - stream.n-- - } -} - -// WriteFloat64 write float64 to stream -func (stream *Stream) WriteFloat64(val float64) { - abs := math.Abs(val) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } - } - stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64)) -} - -// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat64Lossy(val float64) { - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat64(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(val*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - stream.ensure(10) - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[stream.n-1] == '0' { - stream.n-- - } -} diff --git a/vendor/github.com/json-iterator/go/feature_stream_int.go b/vendor/github.com/json-iterator/go/feature_stream_int.go deleted file mode 100644 index 7cfd522c106..00000000000 --- a/vendor/github.com/json-iterator/go/feature_stream_int.go +++ /dev/null @@ -1,320 +0,0 @@ -package jsoniter - -var digits []uint32 - -func init() { - digits = make([]uint32, 1000) - for i := uint32(0); i < 1000; i++ { - digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' - if i < 10 { - digits[i] += 2 << 24 - } else if i < 100 { - digits[i] += 1 << 24 - } - } -} - -func writeFirstBuf(buf []byte, v uint32, n int) int { - start := v >> 24 - if start == 0 { - buf[n] = byte(v >> 16) - n++ - buf[n] = byte(v >> 8) - n++ - } else if start == 1 { - buf[n] = byte(v >> 8) - n++ - } - buf[n] = byte(v) - n++ - return n -} - -func writeBuf(buf []byte, v uint32, n int) { - buf[n] = byte(v >> 16) - buf[n+1] = byte(v >> 8) - buf[n+2] = byte(v) -} - -// WriteUint8 write uint8 to stream -func (stream *Stream) WriteUint8(val uint8) { - stream.ensure(3) - stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) -} - -// WriteInt8 write int8 to stream -func (stream *Stream) WriteInt8(nval int8) { - stream.ensure(4) - n := stream.n - var val uint8 - if nval < 0 { - val = uint8(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint8(nval) - } - stream.n = writeFirstBuf(stream.buf, digits[val], n) -} - -// WriteUint16 write uint16 to stream -func (stream *Stream) WriteUint16(val uint16) { - stream.ensure(5) - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) - return - } - r1 := val - q1*1000 - n := writeFirstBuf(stream.buf, digits[q1], stream.n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return -} - -// WriteInt16 write int16 to stream -func (stream *Stream) WriteInt16(nval int16) { - stream.ensure(6) - n := stream.n - var val uint16 - if nval < 0 { - val = uint16(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint16(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - n = writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return -} - -// WriteUint32 write uint32 to stream -func (stream *Stream) WriteUint32(val uint32) { - stream.ensure(10) - n := stream.n - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - } else { - r3 := q2 - q3*1000 - stream.buf[n] = byte(q3 + '0') - n++ - writeBuf(stream.buf, digits[r3], n) - n += 3 - } - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 -} - -// WriteInt32 write int32 to stream -func (stream *Stream) WriteInt32(nval int32) { - stream.ensure(11) - n := stream.n - var val uint32 - if nval < 0 { - val = uint32(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint32(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - } else { - r3 := q2 - q3*1000 - stream.buf[n] = byte(q3 + '0') - n++ - writeBuf(stream.buf, digits[r3], n) - n += 3 - } - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 -} - -// WriteUint64 write uint64 to stream -func (stream *Stream) WriteUint64(val uint64) { - stream.ensure(20) - n := stream.n - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - n = writeFirstBuf(stream.buf, digits[q3], n) - writeBuf(stream.buf, digits[r3], n) - writeBuf(stream.buf, digits[r2], n+3) - writeBuf(stream.buf, digits[r1], n+6) - stream.n = n + 9 - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - n = writeFirstBuf(stream.buf, digits[q4], n) - writeBuf(stream.buf, digits[r4], n) - writeBuf(stream.buf, digits[r3], n+3) - writeBuf(stream.buf, digits[r2], n+6) - writeBuf(stream.buf, digits[r1], n+9) - stream.n = n + 12 - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - n = writeFirstBuf(stream.buf, digits[q5], n) - } else { - n = writeFirstBuf(stream.buf, digits[q6], n) - r6 := q5 - q6*1000 - writeBuf(stream.buf, digits[r6], n) - n += 3 - } - writeBuf(stream.buf, digits[r5], n) - writeBuf(stream.buf, digits[r4], n+3) - writeBuf(stream.buf, digits[r3], n+6) - writeBuf(stream.buf, digits[r2], n+9) - writeBuf(stream.buf, digits[r1], n+12) - stream.n = n + 15 -} - -// WriteInt64 write int64 to stream -func (stream *Stream) WriteInt64(nval int64) { - stream.ensure(20) - n := stream.n - var val uint64 - if nval < 0 { - val = uint64(-nval) - stream.buf[n] = '-' - n++ - } else { - val = uint64(nval) - } - q1 := val / 1000 - if q1 == 0 { - stream.n = writeFirstBuf(stream.buf, digits[val], n) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - n := writeFirstBuf(stream.buf, digits[q1], n) - writeBuf(stream.buf, digits[r1], n) - stream.n = n + 3 - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - n = writeFirstBuf(stream.buf, digits[q2], n) - writeBuf(stream.buf, digits[r2], n) - writeBuf(stream.buf, digits[r1], n+3) - stream.n = n + 6 - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - n = writeFirstBuf(stream.buf, digits[q3], n) - writeBuf(stream.buf, digits[r3], n) - writeBuf(stream.buf, digits[r2], n+3) - writeBuf(stream.buf, digits[r1], n+6) - stream.n = n + 9 - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - n = writeFirstBuf(stream.buf, digits[q4], n) - writeBuf(stream.buf, digits[r4], n) - writeBuf(stream.buf, digits[r3], n+3) - writeBuf(stream.buf, digits[r2], n+6) - writeBuf(stream.buf, digits[r1], n+9) - stream.n = n + 12 - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - n = writeFirstBuf(stream.buf, digits[q5], n) - } else { - stream.buf[n] = byte(q6 + '0') - n++ - r6 := q5 - q6*1000 - writeBuf(stream.buf, digits[r6], n) - n += 3 - } - writeBuf(stream.buf, digits[r5], n) - writeBuf(stream.buf, digits[r4], n+3) - writeBuf(stream.buf, digits[r3], n+6) - writeBuf(stream.buf, digits[r2], n+9) - writeBuf(stream.buf, digits[r1], n+12) - stream.n = n + 15 -} - -// WriteInt write int to stream -func (stream *Stream) WriteInt(val int) { - stream.WriteInt64(int64(val)) -} - -// WriteUint write uint to stream -func (stream *Stream) WriteUint(val uint) { - stream.WriteUint64(uint64(val)) -} diff --git a/vendor/github.com/json-iterator/go/feature_stream_string.go b/vendor/github.com/json-iterator/go/feature_stream_string.go deleted file mode 100644 index 334282f05fa..00000000000 --- a/vendor/github.com/json-iterator/go/feature_stream_string.go +++ /dev/null @@ -1,396 +0,0 @@ -package jsoniter - -import ( - "unicode/utf8" -) - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML - - - - - - - - - - - - - - - - - - - - -
 
-
- - -`) - -func third_partySwaggerUiIndexHtmlBytes() ([]byte, error) { - return _third_partySwaggerUiIndexHtml, nil -} - -func third_partySwaggerUiIndexHtml() (*asset, error) { - bytes, err := third_partySwaggerUiIndexHtmlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "third_party/swagger-ui/index.html", size: 3561, mode: os.FileMode(436), modTime: time.Unix(1468871087, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _third_partySwaggerUiLibBackboneMinJs = []byte(`// Backbone.js 1.1.2 - -(function(t,e){if(typeof define==="function"&&define.amd){define(["underscore","jquery","exports"],function(i,r,s){t.Backbone=e(t,s,i,r)})}else if(typeof exports!=="undefined"){var i=require("underscore");e(t,exports,i)}else{t.Backbone=e(t,{},t._,t.jQuery||t.Zepto||t.ender||t.$)}})(this,function(t,e,i,r){var s=t.Backbone;var n=[];var a=n.push;var o=n.slice;var h=n.splice;e.VERSION="1.1.2";e.$=r;e.noConflict=function(){t.Backbone=s;return this};e.emulateHTTP=false;e.emulateJSON=false;var u=e.Events={on:function(t,e,i){if(!c(this,"on",t,[e,i])||!e)return this;this._events||(this._events={});var r=this._events[t]||(this._events[t]=[]);r.push({callback:e,context:i,ctx:i||this});return this},once:function(t,e,r){if(!c(this,"once",t,[e,r])||!e)return this;var s=this;var n=i.once(function(){s.off(t,n);e.apply(this,arguments)});n._callback=e;return this.on(t,n,r)},off:function(t,e,r){var s,n,a,o,h,u,l,f;if(!this._events||!c(this,"off",t,[e,r]))return this;if(!t&&!e&&!r){this._events=void 0;return this}o=t?[t]:i.keys(this._events);for(h=0,u=o.length;h").attr(t);this.setElement(r,false)}else{this.setElement(i.result(this,"el"),false)}}});e.sync=function(t,r,s){var n=T[t];i.defaults(s||(s={}),{emulateHTTP:e.emulateHTTP,emulateJSON:e.emulateJSON});var a={type:n,dataType:"json"};if(!s.url){a.url=i.result(r,"url")||M()}if(s.data==null&&r&&(t==="create"||t==="update"||t==="patch")){a.contentType="application/json";a.data=JSON.stringify(s.attrs||r.toJSON(s))}if(s.emulateJSON){a.contentType="application/x-www-form-urlencoded";a.data=a.data?{model:a.data}:{}}if(s.emulateHTTP&&(n==="PUT"||n==="DELETE"||n==="PATCH")){a.type="POST";if(s.emulateJSON)a.data._method=n;var o=s.beforeSend;s.beforeSend=function(t){t.setRequestHeader("X-HTTP-Method-Override",n);if(o)return o.apply(this,arguments)}}if(a.type!=="GET"&&!s.emulateJSON){a.processData=false}if(a.type==="PATCH"&&k){a.xhr=function(){return new ActiveXObject("Microsoft.XMLHTTP")}}var h=s.xhr=e.ajax(i.extend(a,s));r.trigger("request",r,h,s);return h};var k=typeof window!=="undefined"&&!!window.ActiveXObject&&!(window.XMLHttpRequest&&(new XMLHttpRequest).dispatchEvent);var T={create:"POST",update:"PUT",patch:"PATCH","delete":"DELETE",read:"GET"};e.ajax=function(){return e.$.ajax.apply(e.$,arguments)};var $=e.Router=function(t){t||(t={});if(t.routes)this.routes=t.routes;this._bindRoutes();this.initialize.apply(this,arguments)};var S=/\((.*?)\)/g;var H=/(\(\?)?:\w+/g;var A=/\*\w+/g;var I=/[\-{}\[\]+?.,\\\^$|#\s]/g;i.extend($.prototype,u,{initialize:function(){},route:function(t,r,s){if(!i.isRegExp(t))t=this._routeToRegExp(t);if(i.isFunction(r)){s=r;r=""}if(!s)s=this[r];var n=this;e.history.route(t,function(i){var a=n._extractParameters(t,i);n.execute(s,a);n.trigger.apply(n,["route:"+r].concat(a));n.trigger("route",r,a);e.history.trigger("route",n,r,a)});return this},execute:function(t,e){if(t)t.apply(this,e)},navigate:function(t,i){e.history.navigate(t,i);return this},_bindRoutes:function(){if(!this.routes)return;this.routes=i.result(this,"routes");var t,e=i.keys(this.routes);while((t=e.pop())!=null){this.route(t,this.routes[t])}},_routeToRegExp:function(t){t=t.replace(I,"\\$&").replace(S,"(?:$1)?").replace(H,function(t,e){return e?t:"([^/?]+)"}).replace(A,"([^?]*?)");return new RegExp("^"+t+"(?:\\?([\\s\\S]*))?$")},_extractParameters:function(t,e){var r=t.exec(e).slice(1);return i.map(r,function(t,e){if(e===r.length-1)return t||null;return t?decodeURIComponent(t):null})}});var N=e.History=function(){this.handlers=[];i.bindAll(this,"checkUrl");if(typeof window!=="undefined"){this.location=window.location;this.history=window.history}};var R=/^[#\/]|\s+$/g;var O=/^\/+|\/+$/g;var P=/msie [\w.]+/;var C=/\/$/;var j=/#.*$/;N.started=false;i.extend(N.prototype,u,{interval:50,atRoot:function(){return this.location.pathname.replace(/[^\/]$/,"$&/")===this.root},getHash:function(t){var e=(t||this).location.href.match(/#(.*)$/);return e?e[1]:""},getFragment:function(t,e){if(t==null){if(this._hasPushState||!this._wantsHashChange||e){t=decodeURI(this.location.pathname+this.location.search);var i=this.root.replace(C,"");if(!t.indexOf(i))t=t.slice(i.length)}else{t=this.getHash()}}return t.replace(R,"")},start:function(t){if(N.started)throw new Error("Backbone.history has already been started");N.started=true;this.options=i.extend({root:"/"},this.options,t);this.root=this.options.root;this._wantsHashChange=this.options.hashChange!==false;this._wantsPushState=!!this.options.pushState;this._hasPushState=!!(this.options.pushState&&this.history&&this.history.pushState);var r=this.getFragment();var s=document.documentMode;var n=P.exec(navigator.userAgent.toLowerCase())&&(!s||s<=7);this.root=("/"+this.root+"/").replace(O,"/");if(n&&this._wantsHashChange){var a=e.$('