From ea17356f0b8b5e84f440cdd6abf2b9370e16dc53 Mon Sep 17 00:00:00 2001 From: OrangeBao Date: Mon, 15 Apr 2024 17:58:39 +0800 Subject: [PATCH] feat: auto join/unjoin node Signed-off-by: OrangeBao --- cmd/kubenest/operator/app/operator.go | 10 + cmd/kubenest/operator/kubelet_node_helper.sh | 137 ++ deploy/virtual-cluster-operator.yml | 225 ++- go.mod | 2 +- go.sum | 1 - hack/docker.sh | 2 +- .../kosmos/v1alpha1/virtualcluster_types.go | 13 +- pkg/kubenest/constants/constant.go | 2 + .../constants.go | 14 + .../exector/exector.go | 176 ++ .../exector/remote_cmd.go | 19 + .../exector/remote_scp.go | 89 + .../join-worker.go | 184 ++ .../node_controller.go | 240 +++ .../virtualcluster.node.controller/share.go | 50 + .../unjoin-worker.go | 83 + .../nodepool.go | 56 + .../github.com/gorilla/websocket/.gitignore | 25 + vendor/github.com/gorilla/websocket/AUTHORS | 9 + vendor/github.com/gorilla/websocket/LICENSE | 22 + vendor/github.com/gorilla/websocket/README.md | 64 + vendor/github.com/gorilla/websocket/client.go | 395 ++++ .../gorilla/websocket/client_clone.go | 16 + .../gorilla/websocket/client_clone_legacy.go | 38 + .../gorilla/websocket/compression.go | 148 ++ vendor/github.com/gorilla/websocket/conn.go | 1201 +++++++++++ .../gorilla/websocket/conn_write.go | 15 + .../gorilla/websocket/conn_write_legacy.go | 18 + vendor/github.com/gorilla/websocket/doc.go | 227 +++ vendor/github.com/gorilla/websocket/join.go | 42 + vendor/github.com/gorilla/websocket/json.go | 60 + vendor/github.com/gorilla/websocket/mask.go | 54 + .../github.com/gorilla/websocket/mask_safe.go | 15 + .../github.com/gorilla/websocket/prepared.go | 102 + vendor/github.com/gorilla/websocket/proxy.go | 77 + vendor/github.com/gorilla/websocket/server.go | 363 ++++ vendor/github.com/gorilla/websocket/trace.go | 19 + .../github.com/gorilla/websocket/trace_17.go | 12 + vendor/github.com/gorilla/websocket/util.go | 283 +++ .../gorilla/websocket/x_net_proxy.go | 473 +++++ vendor/k8s.io/metrics/LICENSE | 201 -- vendor/k8s.io/metrics/pkg/apis/metrics/doc.go | 21 - .../metrics/pkg/apis/metrics/register.go | 55 - .../k8s.io/metrics/pkg/apis/metrics/types.go | 101 - .../metrics/pkg/apis/metrics/v1alpha1/doc.go | 24 - .../pkg/apis/metrics/v1alpha1/generated.pb.go | 1758 ----------------- .../pkg/apis/metrics/v1alpha1/generated.proto | 95 - .../pkg/apis/metrics/v1alpha1/register.go | 53 - .../pkg/apis/metrics/v1alpha1/types.go | 101 - .../v1alpha1/zz_generated.conversion.go | 209 -- .../metrics/v1alpha1/zz_generated.deepcopy.go | 186 -- .../metrics/pkg/apis/metrics/v1beta1/doc.go | 24 - .../pkg/apis/metrics/v1beta1/generated.pb.go | 1758 ----------------- .../pkg/apis/metrics/v1beta1/generated.proto | 95 - .../pkg/apis/metrics/v1beta1/register.go | 53 - .../metrics/pkg/apis/metrics/v1beta1/types.go | 101 - .../v1beta1/zz_generated.conversion.go | 209 -- .../metrics/v1beta1/zz_generated.deepcopy.go | 186 -- .../pkg/apis/metrics/zz_generated.deepcopy.go | 186 -- .../client/clientset/versioned/clientset.go | 133 -- .../pkg/client/clientset/versioned/doc.go | 20 - .../client/clientset/versioned/scheme/doc.go | 20 - .../clientset/versioned/scheme/register.go | 58 - .../versioned/typed/metrics/v1alpha1/doc.go | 20 - .../metrics/v1alpha1/generated_expansion.go | 23 - .../typed/metrics/v1alpha1/metrics_client.go | 112 -- .../typed/metrics/v1alpha1/nodemetrics.go | 98 - .../typed/metrics/v1alpha1/podmetrics.go | 103 - .../versioned/typed/metrics/v1beta1/doc.go | 20 - .../metrics/v1beta1/generated_expansion.go | 23 - .../typed/metrics/v1beta1/metrics_client.go | 112 -- .../typed/metrics/v1beta1/nodemetrics.go | 98 - .../typed/metrics/v1beta1/podmetrics.go | 103 - vendor/modules.txt | 12 +- 74 files changed, 4960 insertions(+), 6392 deletions(-) create mode 100755 cmd/kubenest/operator/kubelet_node_helper.sh create mode 100644 pkg/kubenest/controller/virtualcluster.node.controller/constants.go create mode 100644 pkg/kubenest/controller/virtualcluster.node.controller/exector/exector.go create mode 100644 pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_cmd.go create mode 100644 pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_scp.go create mode 100644 pkg/kubenest/controller/virtualcluster.node.controller/join-worker.go create mode 100644 pkg/kubenest/controller/virtualcluster.node.controller/node_controller.go create mode 100644 pkg/kubenest/controller/virtualcluster.node.controller/share.go create mode 100644 pkg/kubenest/controller/virtualcluster.node.controller/unjoin-worker.go create mode 100644 pkg/kubenest/controller/virtualcluster.nodepool.controller/nodepool.go create mode 100644 vendor/github.com/gorilla/websocket/.gitignore create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS create mode 100644 vendor/github.com/gorilla/websocket/LICENSE create mode 100644 vendor/github.com/gorilla/websocket/README.md create mode 100644 vendor/github.com/gorilla/websocket/client.go create mode 100644 vendor/github.com/gorilla/websocket/client_clone.go create mode 100644 vendor/github.com/gorilla/websocket/client_clone_legacy.go create mode 100644 vendor/github.com/gorilla/websocket/compression.go create mode 100644 vendor/github.com/gorilla/websocket/conn.go create mode 100644 vendor/github.com/gorilla/websocket/conn_write.go create mode 100644 vendor/github.com/gorilla/websocket/conn_write_legacy.go create mode 100644 vendor/github.com/gorilla/websocket/doc.go create mode 100644 vendor/github.com/gorilla/websocket/join.go create mode 100644 vendor/github.com/gorilla/websocket/json.go create mode 100644 vendor/github.com/gorilla/websocket/mask.go create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go create mode 100644 vendor/github.com/gorilla/websocket/prepared.go create mode 100644 vendor/github.com/gorilla/websocket/proxy.go create mode 100644 vendor/github.com/gorilla/websocket/server.go create mode 100644 vendor/github.com/gorilla/websocket/trace.go create mode 100644 vendor/github.com/gorilla/websocket/trace_17.go create mode 100644 vendor/github.com/gorilla/websocket/util.go create mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go delete mode 100644 vendor/k8s.io/metrics/LICENSE delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/doc.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/register.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/types.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/register.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/doc.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/doc.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/metrics_client.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/doc.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/metrics_client.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go delete mode 100644 vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go diff --git a/cmd/kubenest/operator/app/operator.go b/cmd/kubenest/operator/app/operator.go index 96ca62348..41fda9911 100644 --- a/cmd/kubenest/operator/app/operator.go +++ b/cmd/kubenest/operator/app/operator.go @@ -15,6 +15,7 @@ import ( "github.com/kosmos.io/kosmos/pkg/kubenest/constants" "github.com/kosmos.io/kosmos/pkg/kubenest/controller" kosmos "github.com/kosmos.io/kosmos/pkg/kubenest/controller/kosmos" + vcnodecontroller "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller" "github.com/kosmos.io/kosmos/pkg/scheme" "github.com/kosmos.io/kosmos/pkg/sharedcli/klogflag" ) @@ -83,6 +84,15 @@ func run(ctx context.Context, opts *options.Options) error { return fmt.Errorf("error starting %s: %v", constants.InitControllerName, err) } + VirtualClusterNodeController := vcnodecontroller.NodeController{ + Client: mgr.GetClient(), + EventRecorder: mgr.GetEventRecorderFor(constants.NodeControllerName), + } + + if err = VirtualClusterNodeController.SetupWithManager(mgr); err != nil { + return fmt.Errorf("error starting %s: %v", constants.NodeControllerName, err) + } + if opts.KosmosJoinController { KosmosJoinController := kosmos.KosmosJoinController{ Client: mgr.GetClient(), diff --git a/cmd/kubenest/operator/kubelet_node_helper.sh b/cmd/kubenest/operator/kubelet_node_helper.sh new file mode 100755 index 000000000..79dd2d5b1 --- /dev/null +++ b/cmd/kubenest/operator/kubelet_node_helper.sh @@ -0,0 +1,137 @@ +#!/usr/bin/env bash + +SCRIPT_VERSION=0.0.1 +# save tmp file +PATH_FILE_TMP=/apps/conf/kosmos/tmp +################################################### +# path for kubeadm +PATH_KUBEADM=/usr/bin/kubeadm +################################################## +# path for kubernetes +PATH_KUBERNETES=/etc/kubernetes/ +PATH_KUBERNETES_PKI="$PATH_KUBERNETES/pki" +# scpKCCmd.name +KUBELET_KUBE_CONFIG_NAME=kubelet.conf +################################################## +# path for kubelet +PATH_KUBELET_LIB=/var/lib/kubelet +# scpKubeletConfigCmd.name +KUBELET_CONFIG_NAME=config.yaml + +# args +DNS_ADDRESS=${2:-10.237.0.10} +LOG_NAME=${2:-kubelet} + +function unjoin() { + # before unjoin, you need delete node by kubectl + echo "exec(1/1): kubeadm reset...." + echo "y" | ${PATH_KUBEADM} reset + if [ $? -ne 0 ]; then + exit 1 + fi +} + + +# before join, you need upload ca.crt and kubeconfig to tmp dir!!! +function join() { + echo "exec(1/7): stop containerd...." + systemctl stop containerd + if [ $? -ne 0 ]; then + exit 1 + fi + echo "exec(2/7): copy ca.crt...." + cp "$PATH_FILE_TMP/ca.crt" "$PATH_KUBERNETES_PKI/ca.crt" + if [ $? -ne 0 ]; then + exit 1 + fi + echo "exec(3/7): copy kubeconfig...." + cp "$PATH_FILE_TMP/$KUBELET_KUBE_CONFIG_NAME" "$PATH_KUBERNETES/$KUBELET_KUBE_CONFIG_NAME" + if [ $? -ne 0 ]; then + exit 1 + fi + echo "exec(4/7): set core dns address...." + sed -e "s|__DNS_ADDRESS__|$DNS_ADDRESS|g" -e "w ${PATH_KUBELET_LIB}/${KUBELET_CONFIG_NAME}" "$PATH_FILE_TMP"/"$KUBELET_CONFIG_NAME" + if [ $? -ne 0 ]; then + exit 1 + fi + echo "exec(5/7): copy kubeadm-flags.env...." + cp "$PATH_FILE_TMP/kubeadm-flags.env" "$PATH_KUBELET_LIB/kubeadm-flags.env" + if [ $? -ne 0 ]; then + exit 1 + fi + echo "exec(6/7): start containerd" + systemctl start containerd + if [ $? -ne 0 ]; then + exit 1 + fi + echo "exec(7/7): start kubelet...." + systemctl start kubelet + if [ $? -ne 0 ]; then + exit 1 + fi +} + +function health() { + result=`systemctl is-active containerd` + if [[ $result != "active" ]]; then + echo "health(1/2): containerd is inactive" + exit 1 + else + echo "health(1/2): containerd is active" + fi + + result=`systemctl is-active kubelet` + if [[ $result != "active" ]]; then + echo "health(2/2): kubelet is inactive" + exit 1 + else + echo "health(2/2): containerd is active" + fi +} + +function log() { + systemctl status $LOG_NAME +} + +# check the environments +function check() { + if [ ! -d "$PATH_FILE_TMP" ]; then + echo "check(1/2): try to create $PATH_FILE_TMP" + mkdir -p "$PATH_FILE_TMP" + if [ $? -ne 0 ]; then + exit 1 + fi + echo "check(2/2): copy kubeadm-flags.env to create $PATH_FILE_TMP" + echo "y" | cp "$PATH_KUBELET_LIB/kubeadm-flags.env" "$PATH_FILE_TMP/" + fi + echo "environments is ok" +} + +function version() { + echo "$SCRIPT_VERSION" +} + +# See how we were called. +case "$1" in + unjoin) + unjoin + ;; + join) + join + ;; + health) + health + ;; + check) + check + ;; + log) + log + ;; + version) + version + ;; + *) + echo $"usage: $0 unjoin|join|health|log|check|version" + exit 1 +esac diff --git a/deploy/virtual-cluster-operator.yml b/deploy/virtual-cluster-operator.yml index 6b2382b0b..eb15ba3fc 100644 --- a/deploy/virtual-cluster-operator.yml +++ b/deploy/virtual-cluster-operator.yml @@ -37,6 +37,188 @@ type: Opaque data: kubeconfig: __kubeconfig__ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: virtual-cluster-operator + namespace: kosmos-system +data: + kubelet_node_helper.sh: | + #!/usr/bin/env bash + + SCRIPT_VERSION=0.0.1 + PATH_KUBEADM=/apps/bin/kubeadm + # save tmp file + PATH_FILE_TMP=/apps/conf/kosmos/tmp + PATH_KUBERNETES=/apps/conf/kubernetes + PATH_KUBERNETES_PKI="$PATH_KUBERNETES/pki" + # args + DNS_ADDRESS=${2:-10.237.0.10} + LOG_NAME=${2:-kubelet} + + function unjoin() { + # before unjoin, you need delete node by kubectl + echo "exec(1/1): kubeadm reset...." + echo "y" | . ${PATH_KUBEADM} + if [ $? -ne 0 ]; then + exit 1 + fi + } + + + # before join, you need upload ca.crt and kubeconfig to tmp dir!!! + function join() { + echo "exec(1/6): stop containerd...." + systemctl stop containerd + if [ $? -ne 0 ]; then + exit 1 + fi + echo "exec(2/6): copy ca.crt...." + cp "$PATH_FILE_TMP/ca.crt" "$PATH_KUBERNETES_PKI/ca.crt" + if [ $? -ne 0 ]; then + exit 1 + fi + echo "exec(3/6): copy kubeconfig...." + cp "$PATH_FILE_TMP/kubeconfig" "$PATH_KUBERNETES/kubeconfig" + if [ $? -ne 0 ]; then + exit 1 + fi + echo "exec(4/6): set core dns address...." + sed -e "s|__DNS_ADDRESS__|$DNS_ADDRESS|g" -e "w ${PATH_KUBERNETES}/kubelet-config.yaml" "$PATH_FILE_TMP"/kubelet-config.yaml + if [ $? -ne 0 ]; then + exit 1 + fi + echo "exec(5/6): start containerd" + systemctl start containerd + if [ $? -ne 0 ]; then + exit 1 + fi + echo "exec(6/6): start kubelet...." + systemctl start kubelet + if [ $? -ne 0 ]; then + exit 1 + fi + } + + function health() { + result=`systemctl is-active containerd` + if [[ $result != "active" ]]; then + echo "health(1/2): containerd is inactive" + exit 1 + else + echo "health(1/2): containerd is active" + fi + + result=`systemctl is-active kubelet` + if [[ $result != "active" ]]; then + echo "health(2/2): kubelet is inactive" + exit 1 + else + echo "health(2/2): containerd is active" + fi + } + + function log() { + systemctl status $LOG_NAME + } + + # check the environments + function check() { + if [ ! -d "$PATH_FILE_TMP" ]; then + echo "try to create $PATH_FILE_TMP" + mkdir -p "$PATH_FILE_TMP" + if [ $? -ne 0 ]; then + exit 1 + fi + fi + echo "environments is ok" + } + + function version() { + echo "$SCRIPT_VERSION" + } + + # See how we were called. + case "$1" in + unjoin) + unjoin + ;; + join) + join + ;; + health) + health + ;; + check) + check + ;; + log) + log + ;; + version) + version + ;; + *) + echo $"usage: $0 unjoin|join|health|log|check|version" + exit 1 + esac + config.yaml: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: cgroupfs + clusterDNS: + - __DNS_ADDRESS__ + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionHard: + imagefs.available: 15% + memory.available: 100Mi + nodefs.available: 10% + nodefs.inodesFree: 5% + evictionPressureTransitionPeriod: 5m0s + fileCheckFrequency: 0s + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + kubeAPIBurst: 100 + kubeAPIQPS: 100 + kubeReserved: + cpu: 140m + memory: 1.80G + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + --- apiVersion: apps/v1 kind: Deployment @@ -57,21 +239,30 @@ spec: spec: serviceAccountName: virtual-cluster-operator containers: - - name: virtual-cluster-operator - image: repo1-cn-beijing.cr.volces.com/virtualcluster/virtualcluter/virtual-cluster-operator:ee1bcd33-dirty - imagePullPolicy: IfNotPresent - env: - - name: IMAGE_REPOSITIRY - value: repo1-cn-beijing.cr.volces.com/virtualcluster/virtualcluter - volumeMounts: - - name: credentials - mountPath: /etc/virtual-cluster-operator - readOnly: true - command: - - virtual-cluster-operator - - --kubeconfig=/etc/virtual-cluster-operator/kubeconfig - - --v=4 - volumes: + - name: virtual-cluster-operator + image: cis-hub-huabei-3.cmecloud.cn/vc-develop/virtual-cluster-operator:0.0.1 + imagePullPolicy: IfNotPresent + env: + - name: IMAGE_REPOSITIRY + value: repo1-cn-beijing.cr.volces.com/virtualcluster/virtualcluter + volumeMounts: - name: credentials - secret: - secretName: virtual-cluster-operator + mountPath: /etc/virtual-cluster-operator + readOnly: true + - name: shellscript + mountPath: /bin/kubelet_node_helper.sh + subPath: kubelet_node_helper.sh + - name: shellscript + mountPath: /bin/config.yaml + subPath: config.yaml + command: + - virtual-cluster-operator + - --kubeconfig=/etc/virtual-cluster-operator/kubeconfig + - --v=4 + volumes: + - name: credentials + secret: + secretName: virtual-cluster-operator + - name: shellscript + configMap: + name: virtual-cluster-operator diff --git a/go.mod b/go.mod index cadaa3ff4..52fec3ea9 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/google/go-cmp v0.5.9 github.com/gorilla/mux v1.8.1 + github.com/gorilla/websocket v1.4.2 github.com/olekukonko/tablewriter v0.0.4 github.com/onsi/ginkgo/v2 v2.9.2 github.com/onsi/gomega v1.27.4 @@ -43,7 +44,6 @@ require ( k8s.io/kube-scheduler v0.0.0 k8s.io/kubectl v0.26.3 k8s.io/kubernetes v1.13.0 - k8s.io/metrics v0.26.3 k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 sigs.k8s.io/controller-runtime v0.14.5 sigs.k8s.io/mcs-api v0.1.0 diff --git a/go.sum b/go.sum index 2d14e37f2..0c25b4561 100644 --- a/go.sum +++ b/go.sum @@ -2307,7 +2307,6 @@ k8s.io/kubelet v0.26.3/go.mod h1:yd5GJNMOFLMKxP1rmZhg6etbYAbdTimF87fBIBtRimA= k8s.io/kubernetes v1.26.3 h1:LtjNGNNpCTRyrWhDJMwTWDX+4h+GLwfULS8pu0xzSdk= k8s.io/kubernetes v1.26.3/go.mod h1:NxzR7U7mS+OGa3J/qweI86Pek//mlfHqDgt6NNGdz8g= k8s.io/legacy-cloud-providers v0.26.3/go.mod h1:Scn0CIcptay5seel6MhAzLtoBseK+fL46uJSP84cnPo= -k8s.io/metrics v0.26.3 h1:pHI8XtmBbGGdh7bL0s2C3v93fJfxyktHPAFsnRYnDTo= k8s.io/metrics v0.26.3/go.mod h1:NNnWARAAz+ZJTs75Z66fJTV7jHcVb3GtrlDszSIr3fE= k8s.io/mount-utils v0.26.3 h1:FxMDiPLCkrYgonfSaKHWltLNkyTg3Q/Xrwn94uwhd8k= k8s.io/mount-utils v0.26.3/go.mod h1:95yx9K6N37y8YZ0/lUh9U6ITosMODNaW0/v4wvaa0Xw= diff --git a/hack/docker.sh b/hack/docker.sh index 9e95e2cae..31efdf918 100755 --- a/hack/docker.sh +++ b/hack/docker.sh @@ -41,7 +41,7 @@ function build_images() { if [[ "${target}" == "clusterlink-floater" ]]; then dockerfile="floater.Dockerfile" fi - + # Preferentially use `docker build`. If we are building multi platform, # or cross building, change to `docker buildx build` cross=$(isCross "${platforms}") diff --git a/pkg/apis/kosmos/v1alpha1/virtualcluster_types.go b/pkg/apis/kosmos/v1alpha1/virtualcluster_types.go index 846f81a36..80e115207 100644 --- a/pkg/apis/kosmos/v1alpha1/virtualcluster_types.go +++ b/pkg/apis/kosmos/v1alpha1/virtualcluster_types.go @@ -12,8 +12,14 @@ const ( Preparing Phase = "Preparing" // Initialized means kubernetes control plane is ready,and kubeconfig is ready for use Initialized Phase = "Initialized" - // Completed means kubernetes control plane is ready,kosmos is joined, and resource is promoted + // Completed means everything is ready,kosmos is joined, and resource is promoted Completed Phase = "Completed" + // ControllerPlaneCompleted means kubernetes control plane is ready,kosmos is joined, and resource is promoted + ControllerPlaneCompleted Phase = "ControllerPlaneCompleted" + // AllNodeReady means all nodes have joined the virtual control plane and are in the running state + AllNodeReady Phase = "AllNodeReady" + // Updating means that some changes are happening + Updating Phase = "Updating" ) // +genclient @@ -76,15 +82,14 @@ type NodeInfo struct { //NodeName defines node name //+optional NodeName string `json:"nodeName,omitempty"` - //Address defines node ip - //+optional - Address string `json:"address,omitempty"` } type VirtualClusterStatus struct { // Phase is the phase of kosmos-operator handling the VirtualCluster // +optional Phase Phase `json:"phase,omitempty"` + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/kubenest/constants/constant.go b/pkg/kubenest/constants/constant.go index 44a47c464..2bd99c98c 100644 --- a/pkg/kubenest/constants/constant.go +++ b/pkg/kubenest/constants/constant.go @@ -4,11 +4,13 @@ import "time" const ( InitControllerName = "virtual-cluster-init-controller" + NodeControllerName = "virtual-cluster-node-controller" KosmosJoinControllerName = "kosmos-join-controller" SystemNs = "kube-system" DefauleImageRepositoryEnv = "IMAGE_REPOSITIRY" DefauleImageVersionEnv = "IMAGE_VERSION" VirtualClusterStatusCompleted = "Completed" + VirtualClusterStatusUpdating = "Updating" VirtualClusterFinalizerName = "kosmos.io/virtual-cluster-finalizer" ServiceType = "NodePort" EtcdServiceType = "ClusterIP" diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/constants.go b/pkg/kubenest/controller/virtualcluster.node.controller/constants.go new file mode 100644 index 000000000..e67b1e988 --- /dev/null +++ b/pkg/kubenest/controller/virtualcluster.node.controller/constants.go @@ -0,0 +1,14 @@ +package vcnodecontroller + +const NodePoolCMName = "node-pool" +const NodePoolCMNS = "kosmos-system" + +const KubeDNSName = "kube-dns" +const KubeDNSNS = "kube-system" + +const ExectorTmpPath = "/apps/conf/kosmos/tmp" + +const NodePoolStateVirtualCluster = "virtualcluster" +const NodePoolStateFree = "free" +const NodePoolStateShare = "share" +const NodePoolCMKeyName = "nodes" diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/exector/exector.go b/pkg/kubenest/controller/virtualcluster.node.controller/exector/exector.go new file mode 100644 index 000000000..5d4d3bcc6 --- /dev/null +++ b/pkg/kubenest/controller/virtualcluster.node.controller/exector/exector.go @@ -0,0 +1,176 @@ +// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package exector + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/gorilla/websocket" + "k8s.io/klog/v2" +) + +type Status int + +const ( + SUCCESS Status = iota + FAILED +) + +type ExectorReturn struct { + Status Status + Reason string + LastLog string +} + +func (r *ExectorReturn) String() string { + return fmt.Sprintf("%d, %s, %s", r.Status, r.Reason, r.LastLog) +} + +type Exector interface { + GetWebSocketOption() WebSocketOption + SendHandler(conn *websocket.Conn, done <-chan struct{}, interrupt chan struct{}, result *ExectorReturn) +} + +type ExectorHelper struct { + Token string + Addr string +} + +func (h *ExectorHelper) createWebsocketConnection(opt WebSocketOption) (*websocket.Conn, *http.Response, error) { + u := url.URL{Scheme: "wss", Host: h.Addr, Path: opt.Path, RawQuery: url.PathEscape(opt.RawQuery)} + // nolint + dl := websocket.Dialer{TLSClientConfig: &tls.Config{RootCAs: nil, InsecureSkipVerify: true}} + + return dl.Dial(u.String(), http.Header{ + "Authorization": []string{"Basic " + h.Token}, + }) +} + +type WebSocketOption struct { + Path string + Addr string + RawQuery string +} + +func (h *ExectorHelper) DoExector(stopCh <-chan struct{}, exector Exector) *ExectorReturn { + ret := h.DoExectorReal(stopCh, exector) + // TODO: + if strings.Contains(ret.LastLog, "No such file or directory") { + // try to update shell script + shellPath := os.Getenv("EXECTOR_SHELL_PATH") + if len(shellPath) == 0 { + shellPath = "." + } + srcFile := fmt.Sprintf("%s/kubelet_node_helper.sh", shellPath) + + klog.V(4).Infof("exector: src file path %s", srcFile) + + scpExector := &SCPExector{ + DstFilePath: ".", + DstFileName: "kubelet_node_helper.sh", + SrcFile: srcFile, + } + + if ret := h.DoExectorReal(stopCh, scpExector); ret.Status == SUCCESS { + return h.DoExectorReal(stopCh, exector) + } else { + return ret + } + } + return ret +} + +func (h *ExectorHelper) DoExectorReal(stopCh <-chan struct{}, exector Exector) *ExectorReturn { + // default is error + result := &ExectorReturn{ + FAILED, "init exector return status", "", + } + + // nolint + conn, _, err := h.createWebsocketConnection(exector.GetWebSocketOption()) + if err != nil { + result.Reason = err.Error() + return result + } + defer conn.Close() + + done := make(chan struct{}) + interrupt := make(chan struct{}) + + go exector.SendHandler(conn, done, interrupt, result) + + go func() { + defer close(done) + for { + _, message, err := conn.ReadMessage() + if err != nil { + klog.V(4).Infof("read: %s", err) + cerr, ok := err.(*websocket.CloseError) + if ok && cerr.Text == "0" { + result.Status = SUCCESS + result.Reason = "success" + } else { + result.Reason = err.Error() + } + return + } + klog.V(4).Infof("recv: %s", string(message)) + // last + result.LastLog = result.LastLog + string(message) + } + }() + + for { + select { + case <-stopCh: // finished circulate when stopCh is closed + close(interrupt) + case <-interrupt: + err := conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) + if err != nil { + result.Reason = err.Error() + return result + } + select { + case <-done: + case <-time.After(time.Second): + } + return result + case <-done: + return result + } + } +} + +func NewExectorHelper(addr string, port string) *ExectorHelper { + var exectorPort string + if len(port) == 0 { + exectorPort = os.Getenv("EXECTOR_SERVER_PORT") + if len(exectorPort) == 0 { + exectorPort = "5678" + } + } else { + exectorPort = port + } + + token := os.Getenv("EXECTOR_SHELL_TOKEN") + if len(token) == 0 { + // token example + // const username = "xxxxxxxx" + // const password = "xxxxxxxx" + // token = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) + // nolint + token = "YWRtaW46YmljaF9vb3NoMnpvaDZPaA==" + } + + return &ExectorHelper{ + Token: token, + Addr: fmt.Sprintf("%s:%s", addr, exectorPort), + } +} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_cmd.go b/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_cmd.go new file mode 100644 index 000000000..a6eb32dbc --- /dev/null +++ b/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_cmd.go @@ -0,0 +1,19 @@ +package exector + +import ( + "github.com/gorilla/websocket" +) + +type CMDExector struct { + Cmd string +} + +func (e *CMDExector) GetWebSocketOption() WebSocketOption { + return WebSocketOption{ + Path: "cmd/", + RawQuery: "command=" + e.Cmd, + } +} + +func (e *CMDExector) SendHandler(_ *websocket.Conn, _ <-chan struct{}, _ chan struct{}, _ *ExectorReturn) { +} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_scp.go b/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_scp.go new file mode 100644 index 000000000..7207d0b72 --- /dev/null +++ b/pkg/kubenest/controller/virtualcluster.node.controller/exector/remote_scp.go @@ -0,0 +1,89 @@ +package exector + +import ( + "bufio" + "fmt" + "os" + + "github.com/gorilla/websocket" + "k8s.io/klog/v2" +) + +type SCPExector struct { + DstFilePath string + DstFileName string + SrcFile string + SrcByte []byte +} + +func (e *SCPExector) GetWebSocketOption() WebSocketOption { + return WebSocketOption{ + Path: "upload/", + RawQuery: fmt.Sprintf("file_name=%s&&file_path=%s", e.DstFileName, e.DstFilePath), + } +} + +func (e *SCPExector) SendHandler(conn *websocket.Conn, done <-chan struct{}, interrupt chan struct{}, result *ExectorReturn) { + errHandler := func(err error) { + klog.V(4).Infof("write: %s", err) + result.Reason = err.Error() + close(interrupt) + } + + send := func(data []byte) error { + err := conn.WriteMessage(websocket.BinaryMessage, []byte(data)) + if err != nil { + return err + } + return nil + } + + if len(e.SrcByte) > 0 { + if err := send(e.SrcByte); err != nil { + errHandler(err) + return + } + } else { + file, err := os.Open(e.SrcFile) + if err != nil { + errHandler(err) + return + } + defer file.Close() + + // 指定每次读取的数据块大小 + bufferSize := 1024 // 例如每次读取 1024 字节 + buffer := make([]byte, bufferSize) + + reader := bufio.NewReader(file) + for { + select { + case <-interrupt: + return + case <-done: + return + default: + } + n, err := reader.Read(buffer) + if err != nil { + // check if EOF + if err.Error() == "EOF" { + break + } + errHandler(err) + return + } + dataToSend := buffer[:n] + + if err := send(dataToSend); err != nil { + errHandler(err) + return + } + } + } + + if err := send([]byte("EOF")); err != nil { + errHandler(err) + return + } +} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/join-worker.go b/pkg/kubenest/controller/virtualcluster.node.controller/join-worker.go new file mode 100644 index 000000000..18d6705dd --- /dev/null +++ b/pkg/kubenest/controller/virtualcluster.node.controller/join-worker.go @@ -0,0 +1,184 @@ +package vcnodecontroller + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" + "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/exector" + vcrnodepoolcontroller "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.nodepool.controller" +) + +// kubeadm join +func getJoinCmdStr(log string) (string, error) { + strs := strings.Split(log, "kubeadm join") + if len(strs) != 2 { + return "", fmt.Errorf("get join cmd str failed") + } + return fmt.Sprintf("kubeadm join %s", strs[1]), nil +} + +func (r *NodeController) WaitNodeReady(ctx context.Context, nodeInfo vcrnodepoolcontroller.NodeItem, k8sClient kubernetes.Interface) error { + waitCtx, cancel := context.WithTimeout(ctx, 60*time.Second) // total waiting time + defer cancel() + + isReady := false + + wait.UntilWithContext(waitCtx, func(ctx context.Context) { + node, err := k8sClient.CoreV1().Nodes().Get(waitCtx, nodeInfo.Name, metav1.GetOptions{}) + if err == nil { + if node.Status.Phase == v1.NodeRunning { + klog.V(4).Infof("node %s is ready", nodeInfo.Name) + isReady = true + cancel() + } else { + klog.V(4).Infof("node %s is not ready, status: %s", nodeInfo.Name, node.Status.Phase) + } + } else { + klog.V(4).Infof("get node %s failed: %s", nodeInfo.Name, err) + } + }, 10*time.Second) // Interval time + + if isReady { + return nil + } + + return fmt.Errorf("node %s is not ready", nodeInfo.Name) +} + +func (r *NodeController) joinNode(ctx context.Context, nodeInfos []vcrnodepoolcontroller.NodeItem, virtualCluster v1alpha1.VirtualCluster, k8sClient kubernetes.Interface) error { + if len(nodeInfos) == 0 { + return nil + } + + clusterDNS := "127.0.0.1" + dnssvc, err := k8sClient.CoreV1().Services((KubeDNSNS)).Get(ctx, KubeDNSName, metav1.GetOptions{}) + if err != nil { + // TODO: wait dns + // return fmt.Errorf("get kube-dns service failed: %s", err) + klog.Errorf("get kube-dns service failed: %s", err) + } else { + clusterDNS = dnssvc.Spec.ClusterIP + } + + for _, nodeInfo := range nodeInfos { + // add node to new cluster + exectHelper := exector.NewExectorHelper(nodeInfo.Address, "") + + // check + checkCmd := &exector.CMDExector{ + Cmd: "sh kubelet_node_helper.sh check", + } + ret := exectHelper.DoExector(ctx.Done(), checkCmd) + if ret.Status != exector.SUCCESS { + return fmt.Errorf("check node %s failed: %s", nodeInfo.Name, ret.String()) + } + + // step(1/5) reset node + resetCmd := &exector.CMDExector{ + Cmd: "sh kubelet_node_helper.sh unjoin", + } + ret = exectHelper.DoExector(ctx.Done(), resetCmd) + if ret.Status != exector.SUCCESS { + return fmt.Errorf("reset node %s failed: %s", nodeInfo.Name, ret.String()) + } + // step(2/5) scp ca of virtualcluster + nn := types.NamespacedName{ + Namespace: virtualCluster.Namespace, + Name: fmt.Sprintf("%s-cert", virtualCluster.Namespace), + } + targetCert := &v1.Secret{} + if err := r.Get(ctx, nn, targetCert); err != nil { + return fmt.Errorf("get target cert %s failed: %s", nn, err) + } + + cacrt := targetCert.Data["ca.crt"] + scpCrtCmd := &exector.SCPExector{ + DstFilePath: ExectorTmpPath, + DstFileName: "ca.crt", + SrcByte: cacrt, + } + ret = exectHelper.DoExector(ctx.Done(), scpCrtCmd) + if ret.Status != exector.SUCCESS { + return fmt.Errorf("scp ca.crt to node %s failed: %s", nodeInfo.Name, ret.String()) + } + + // step(3/5) scp kubeconfig of virtualcluster + kubeconfig, err := base64.StdEncoding.DecodeString(virtualCluster.Spec.Kubeconfig) + if err != nil { + return fmt.Errorf("decode target kubeconfig %s failed: %s", nn, err) + } + + scpKCCmd := &exector.SCPExector{ + DstFilePath: ExectorTmpPath, + DstFileName: "kubelet.conf", + SrcByte: kubeconfig, + } + ret = exectHelper.DoExector(ctx.Done(), scpKCCmd) + if ret.Status != exector.SUCCESS { + return fmt.Errorf("scp kubeconfig to node %s failed: %s", nodeInfo.Name, ret.String()) + } + + // step(4/5) scp kubelet config + kubeletConfigPath := os.Getenv("EXECTOR_KUBELET_CONFIG_PATH") + if len(kubeletConfigPath) == 0 { + kubeletConfigPath = "/bin/config.yaml" + } + scpKubeletConfigCmd := &exector.SCPExector{ + DstFilePath: ExectorTmpPath, + DstFileName: "config.yaml", + SrcFile: kubeletConfigPath, // from configmap volumn + } + + ret = exectHelper.DoExector(ctx.Done(), scpKubeletConfigCmd) + if ret.Status != exector.SUCCESS { + return fmt.Errorf("scp kubelet config to node %s failed: %s", nodeInfo.Name, ret.String()) + } + + // step(5/5) join node + joinCmd := &exector.CMDExector{ + Cmd: fmt.Sprintf("sh kubelet_node_helper.sh join %s", clusterDNS), + } + ret = exectHelper.DoExector(ctx.Done(), joinCmd) + if ret.Status != exector.SUCCESS { + return fmt.Errorf("join node %s failed: %s", nodeInfo.Name, ret.String()) + } + + // wait node ready + if err := r.WaitNodeReady(ctx, nodeInfo, k8sClient); err != nil { + return err + } + + // TODO: maybe change kubeadm-flags.env + // add label + node, err := k8sClient.CoreV1().Nodes().Get(ctx, nodeInfo.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("get node %s failed: %s", nodeInfo.Name, err) + } + + updateNode := node.DeepCopy() + for k, v := range nodeInfo.Labels { + node.Labels[k] = v + } + + if _, err := k8sClient.CoreV1().Nodes().Update(ctx, updateNode, metav1.UpdateOptions{}); err != nil { + return fmt.Errorf("add label to node %s failed: %s", nodeInfo.Name, err) + } + // update nodepool status + if err := r.UpdateNodePoolState(ctx, nodeInfo.Name, NodePoolStateVirtualCluster); err != nil { + return err + } + } + return nil +} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/node_controller.go b/pkg/kubenest/controller/virtualcluster.node.controller/node_controller.go new file mode 100644 index 000000000..253237747 --- /dev/null +++ b/pkg/kubenest/controller/virtualcluster.node.controller/node_controller.go @@ -0,0 +1,240 @@ +package vcnodecontroller + +import ( + "context" + "encoding/base64" + "fmt" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kosmos.io/kosmos/pkg/apis/kosmos/v1alpha1" + "github.com/kosmos.io/kosmos/pkg/kubenest/constants" + vcrnodepoolcontroller "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.nodepool.controller" + "github.com/kosmos.io/kosmos/pkg/utils" +) + +type NodeController struct { + client.Client + EventRecorder record.EventRecorder +} + +// TODO: status +func (r *NodeController) SetupWithManager(mgr manager.Manager) error { + if r.Client == nil { + r.Client = mgr.GetClient() + } + + skipEvent := func(obj client.Object) bool { + return true + } + + return ctrl.NewControllerManagedBy(mgr). + Named(constants.NodeControllerName). + WithOptions(controller.Options{}). + For(&v1alpha1.VirtualCluster{}, builder.WithPredicates(predicate.Funcs{ + CreateFunc: func(createEvent event.CreateEvent) bool { + return skipEvent(createEvent.Object) + }, + UpdateFunc: func(updateEvent event.UpdateEvent) bool { + return skipEvent(updateEvent.ObjectNew) + }, + DeleteFunc: func(deleteEvent event.DeleteEvent) bool { + return skipEvent(deleteEvent.Object) + }, + GenericFunc: func(genericEvent event.GenericEvent) bool { + return skipEvent(genericEvent.Object) + }, + })). + Complete(r) +} + +func (c *NodeController) GenerateKubeclient(virtualCluster *v1alpha1.VirtualCluster) (kubernetes.Interface, error) { + if len(virtualCluster.Spec.Kubeconfig) == 0 { + return nil, fmt.Errorf("virtualcluster %s kubeconfig is empty", virtualCluster.Name) + } + kubeconfigStream, err := base64.StdEncoding.DecodeString(virtualCluster.Spec.Kubeconfig) + if err != nil { + return nil, fmt.Errorf("virtualcluster %s decode target kubernetes kubeconfig %s err: %v", virtualCluster.Name, virtualCluster.Spec.Kubeconfig, err) + } + + config, err := utils.NewConfigFromBytes(kubeconfigStream) + if err != nil { + return nil, fmt.Errorf("generate kubernetes config failed: %s", err) + } + + k8sClient, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("generate K8s basic client failed: %v", err) + } + + return k8sClient, nil +} + +func hasItemInArray(name string, f func(string) bool) bool { + return f(name) +} + +func compareAndTranformNodes(targetNodes []v1alpha1.NodeInfo, actualNodes []v1.Node, nodePools map[string]vcrnodepoolcontroller.NodeItem) ([]vcrnodepoolcontroller.NodeItem, []vcrnodepoolcontroller.NodeItem, error) { + unjoinNodes := []vcrnodepoolcontroller.NodeItem{} + joinNodes := []vcrnodepoolcontroller.NodeItem{} + + // cacheMap := map[string]string{} + for _, targetNode := range targetNodes { + has := hasItemInArray(targetNode.NodeName, func(name string) bool { + for _, actualNode := range actualNodes { + if actualNode.Name == name { + return true + } + } + return false + }) + + if !has { + nodePool, ok := nodePools[targetNode.NodeName] + if !ok { + return nil, nil, fmt.Errorf("node %s not found in node pool", targetNode.NodeName) + } + joinNodes = append(joinNodes, nodePool) + } + } + + for _, actualNode := range actualNodes { + has := hasItemInArray(actualNode.Name, func(name string) bool { + for _, targetNode := range targetNodes { + if targetNode.NodeName == name { + return true + } + } + return false + }) + + if !has { + nodePool, ok := nodePools[actualNode.Name] + if !ok { + return nil, nil, fmt.Errorf("node %s not found in node pool", actualNode.Name) + } + unjoinNodes = append(unjoinNodes, nodePool) + } + } + + return unjoinNodes, joinNodes, nil +} + +func (r *NodeController) GetNodePool(ctx context.Context) (map[string]vcrnodepoolcontroller.NodeItem, error) { + nodePool := v1.ConfigMap{} + if err := r.Client.Get(ctx, types.NamespacedName{Name: NodePoolCMName, Namespace: NodePoolCMNS}, &nodePool); err != nil { + return nil, fmt.Errorf("get node-pool failed: %v", err) + } + + nodePools, err := vcrnodepoolcontroller.ConvertJsonToNodeItem(nodePool.Data[NodePoolCMKeyName]) + if err != nil { + return nil, fmt.Errorf("convert node-pool failed: %v", err) + } + + return nodePools, nil +} + +func (r *NodeController) UpdateVirtualClusterStatus(ctx context.Context, virtualCluster v1alpha1.VirtualCluster, status v1alpha1.Phase, reason string) error { + updateVirtualCluster := virtualCluster.DeepCopy() + updateVirtualCluster.Status.Phase = status + updateVirtualCluster.Status.Reason = reason + + if err := r.Update(ctx, updateVirtualCluster); err != nil { + return fmt.Errorf("update virtualcluster %s status failed: %s", virtualCluster.Name, err) + } + return nil +} + +func (r *NodeController) DoNodeTask(ctx context.Context, virtualCluster v1alpha1.VirtualCluster) error { + k8sClient, err := r.GenerateKubeclient(&virtualCluster) + if err != nil { + return fmt.Errorf("virtualcluster %s crd kubernetes client failed: %v", virtualCluster.Name, err) + } + + nodes, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("virtualcluster %s get virtual-cluster nodes list failed: %v", virtualCluster.Name, err) + } + + nodePools, err := r.GetNodePool(ctx) + if err != nil { + return err + } + + // compare cr and actual nodes in k8s + unjoinNodes, joinNodes, err := compareAndTranformNodes(virtualCluster.Spec.PromoteResources.NodeInfos, nodes.Items, nodePools) + if err != nil { + return fmt.Errorf("compare cr and actual nodes failed, virtual-cluster-name: %v, err: %s", virtualCluster.Name, err) + } + + if len(unjoinNodes) > 0 || len(joinNodes) > 0 { + if err := r.UpdateVirtualClusterStatus(ctx, virtualCluster, v1alpha1.Updating, "node task"); err != nil { + return err + } + } + if len(unjoinNodes) > 0 { + // unjoin node + if err := r.unjoinNode(ctx, unjoinNodes, k8sClient); err != nil { + return fmt.Errorf("virtualcluster %s unjoin node failed: %v", virtualCluster.Name, err) + } + } + if len(joinNodes) > 0 { + // join node + if err := r.joinNode(ctx, joinNodes, virtualCluster, k8sClient); err != nil { + return fmt.Errorf("virtualcluster %s join node failed: %v", virtualCluster.Name, err) + } + } + + if len(unjoinNodes) > 0 || len(joinNodes) > 0 { + if err := r.UpdateVirtualClusterStatus(ctx, virtualCluster, v1alpha1.AllNodeReady, "node ready"); err != nil { + return err + } + } + return nil +} + +func (r *NodeController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + klog.V(4).Infof("============ virtual-cluster-node-controller start to reconcile %s ============", request.NamespacedName) + defer klog.V(4).Infof("============ virtual-cluster-node-controller finish to reconcile %s ============", request.NamespacedName) + + // check virtual cluster nodes + var virtualCluster v1alpha1.VirtualCluster + if err := r.Get(ctx, request.NamespacedName, &virtualCluster); err != nil { + if apierrors.IsNotFound(err) { + klog.V(4).Infof("virtual-cluster-node-controller: can not found %s", request.NamespacedName) + return reconcile.Result{}, nil + } + klog.Errorf("get clusternode %s error: %v", request.NamespacedName, err) + return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil + } + + if !virtualCluster.GetDeletionTimestamp().IsZero() { + return reconcile.Result{}, nil + } + + if virtualCluster.Status.Phase != v1alpha1.ControllerPlaneCompleted { + klog.V(4).Infof("virtualcluster wait cluster ready, cluster name: %s", virtualCluster.Name) + return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil + } + + if err := r.DoNodeTask(ctx, virtualCluster); err != nil { + klog.Errorf("virtualcluster %s do node task failed: %v", virtualCluster.Name, err) + return reconcile.Result{RequeueAfter: utils.DefaultRequeueTime}, nil + } + + return reconcile.Result{}, nil +} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/share.go b/pkg/kubenest/controller/virtualcluster.node.controller/share.go new file mode 100644 index 000000000..3da99e014 --- /dev/null +++ b/pkg/kubenest/controller/virtualcluster.node.controller/share.go @@ -0,0 +1,50 @@ +package vcnodecontroller + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" + + vcrnodepoolcontroller "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.nodepool.controller" +) + +// TODO: biz +func (r *NodeController) UpdateNodePoolState(ctx context.Context, nodeName string, nodePoolState string) error { + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + nodePool := v1.ConfigMap{} + if err := r.Client.Get(ctx, types.NamespacedName{Name: NodePoolCMName, Namespace: NodePoolCMNS}, &nodePool); err != nil { + return fmt.Errorf("get node-pool failed: %v", err) + } + + updateNodePool := nodePool.DeepCopy() + + jsonStr := updateNodePool.Data[NodePoolCMKeyName] + nodePoolItem, err := vcrnodepoolcontroller.ConvertJsonToNodePoolItem(jsonStr) + if err != nil { + return err + } + + targetNodePoolItem := nodePoolItem[nodeName] + targetNodePoolItem.State = nodePoolState + + nodePoolItem[nodeName] = targetNodePoolItem + + nodePoolBytes, err := vcrnodepoolcontroller.ConvertNodePoolItemToJson(nodePoolItem) + if err != nil { + return err + } + + updateNodePool.Data[NodePoolCMKeyName] = string(nodePoolBytes) + + if err := r.Client.Update(ctx, updateNodePool); err != nil { + return err + } + + return nil + }) + + return err +} diff --git a/pkg/kubenest/controller/virtualcluster.node.controller/unjoin-worker.go b/pkg/kubenest/controller/virtualcluster.node.controller/unjoin-worker.go new file mode 100644 index 000000000..05bc888d7 --- /dev/null +++ b/pkg/kubenest/controller/virtualcluster.node.controller/unjoin-worker.go @@ -0,0 +1,83 @@ +package vcnodecontroller + +import ( + "context" + "fmt" + "os" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + + "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.node.controller/exector" + vcrnodepoolcontroller "github.com/kosmos.io/kosmos/pkg/kubenest/controller/virtualcluster.nodepool.controller" +) + +func (r *NodeController) joinNodeToHost(ctx context.Context, nodeInfo vcrnodepoolcontroller.NodeItem) error { + masterNodeIP := os.Getenv("EXECTOR_HOST_MASTER_NODE_IP") + hostPort := "" + if len(masterNodeIP) == 0 { + return fmt.Errorf("get master node ip from env failed") + } + hostExectorHelper := exector.NewExectorHelper(masterNodeIP, hostPort) + joinCmdStrCmd := &exector.CMDExector{ + Cmd: "kubeadm token create --print-join-command", + } + // step(1/3) get join cmd + ret := hostExectorHelper.DoExector(ctx.Done(), joinCmdStrCmd) + if ret.Status != exector.SUCCESS { + return fmt.Errorf("get host join cmd on node %s failed: %s", nodeInfo.Name, ret.String()) + } + joinCmdStr, err := getJoinCmdStr(ret.LastLog) + if err != nil { + return err + } + + exectHelper := exector.NewExectorHelper(nodeInfo.Address, "") + // step(2/3) remove node from old cluster + resetCmd := &exector.CMDExector{ + Cmd: "sh kubelet_node_helper.sh unjoin", + } + + ret = exectHelper.DoExector(ctx.Done(), resetCmd) + if ret.Status != exector.SUCCESS { + return fmt.Errorf("reset node %s failed: %s", nodeInfo.Name, ret.String()) + } + + // step(3/3) add node to host-cluster + joinCmd := &exector.CMDExector{ + Cmd: joinCmdStr, + } + + ret = exectHelper.DoExector(ctx.Done(), joinCmd) + if ret.Status != exector.SUCCESS { + return fmt.Errorf("exec join cmd on node %s failed: %s, join cmd: %s", nodeInfo.Name, ret.String(), joinCmdStr) + } + + return nil +} + +func (r *NodeController) unjoinNode(ctx context.Context, nodeInfos []vcrnodepoolcontroller.NodeItem, k8sClient kubernetes.Interface) error { + // delete node from cluster + for _, nodeInfo := range nodeInfos { + // remove node from cluster + klog.V(4).Infof("start remove node from cluster, node name: %s", nodeInfo.Name) + err := k8sClient.CoreV1().Nodes().Delete(ctx, nodeInfo.Name, metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("remove node from cluster failed, node name: %s", nodeInfo.Name) + return fmt.Errorf("%s, %s", nodeInfo.Name, err) + } + klog.V(4).Infof("remove node from cluster successed, node name: %s", nodeInfo.Name) + + // TODO: move to node pool controller, add node to host cluster + if err := r.joinNodeToHost(ctx, nodeInfo); err != nil { + klog.Errorf("join node %s to host cluster failed: %s", nodeInfo.Name, err) + return err + } + // update nodepool status + if err := r.UpdateNodePoolState(ctx, nodeInfo.Name, NodePoolStateFree); err != nil { + return err + } + } + return nil +} diff --git a/pkg/kubenest/controller/virtualcluster.nodepool.controller/nodepool.go b/pkg/kubenest/controller/virtualcluster.nodepool.controller/nodepool.go new file mode 100644 index 000000000..f874adb80 --- /dev/null +++ b/pkg/kubenest/controller/virtualcluster.nodepool.controller/nodepool.go @@ -0,0 +1,56 @@ +package vcrnodepoolcontroller + +import ( + "encoding/json" +) + +type NodePoolMapItem struct { + Address string `json:"address"` + Labels map[string]string `json:"labels"` + Cluster string `json:"cluster"` + State string `json:"state"` +} + +type NodeItem struct { + NodePoolMapItem + Name string `json:"-"` +} + +func ConvertJsonToNodeItem(jsonStr string) (map[string]NodeItem, error) { + nodepoolMap := map[string]NodeItem{} + + nodepoolItem, err := ConvertJsonToNodePoolItem(jsonStr) + if err != nil { + return nil, err + } + + for k, v := range nodepoolItem { + nodepoolMap[k] = NodeItem{ + NodePoolMapItem: v, + Name: k, + } + } + + return nodepoolMap, nil +} + +func ConvertJsonToNodePoolItem(jsonStr string) (map[string]NodePoolMapItem, error) { + nodepoolItem := map[string]NodePoolMapItem{} + err := json.Unmarshal([]byte(jsonStr), &nodepoolItem) + if err != nil { + return nil, err + } + return nodepoolItem, nil +} + +func ConvertNodePoolItemToJson(nodepoolItem map[string]NodePoolMapItem) ([]byte, error) { + jsonStr, err := json.Marshal(nodepoolItem) + if err != nil { + return nil, err + } + return jsonStr, nil +} + +// controller task +// TODO: free node need join to host cluster +// TODO: check orphan node diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore new file mode 100644 index 000000000..cd3fcd1ef --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.idea/ +*.iml diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 000000000..1931f4006 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 000000000..9171c9722 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 000000000..19aa2e75c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,64 @@ +# Gorilla WebSocket + +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +### Documentation + +* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + +### Gorilla WebSocket compared with other packages + + + + + + + + + + + + + + + + + + +
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Compression ExtensionsExperimentalNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
+ +Notes: + +1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). +2. The application can get the type of a received data message by implementing + a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) + function. +3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. + Read returns when the input buffer is full or a frame boundary is + encountered. Each call to Write sends a single frame message. The Gorilla + io.Reader and io.WriteCloser operate on a single WebSocket message. + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 000000000..962c06a39 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,395 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, net.DialContext is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer. +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } else { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if u.Scheme == "https" { + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + var err error + if trace != nil { + err = doHandshakeWithTrace(trace, tlsConn, cfg) + } else { + err = doHandshake(tlsConn, cfg) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go new file mode 100644 index 000000000..4f0d94372 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go new file mode 100644 index 000000000..babb007fb --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 000000000..813ffb1e8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 000000000..ca46d2f79 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1201 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan struct{} // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + // bytes remaining in current frame. + // set setReadRemaining to safely update this value and prevent overflow + readRemaining int64 + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan struct{}, 1) + mu <- struct{}{} + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// setReadRemaining tracks the number of bytes remaining on the connection. If n +// overflows, an ErrReadLimit is returned. +func (c *Conn) setReadRemaining(n int64) error { + if n < 0 { + return ErrReadLimit + } + + c.readRemaining = n + return nil +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := 1000 * time.Hour + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return nil, err + } + c.writer = &mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) endMessage(err error) error { + if w.err != nil { + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.endMessage(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.endMessage(err) + } + + if final { + w.endMessage(errWriteClosed) + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + return w.flushFrame(true, nil) +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return err + } + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + final := p[0]&finalBit != 0 + frameType := int(p[0] & 0xf) + mask := p[1]&maskBit != 0 + c.setReadRemaining(int64(p[1] & 0x7f)) + + c.readDecompress = false + if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { + c.readDecompress = true + p[0] &^= rsv1Bit + } + + if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length as per + // https://tools.ietf.org/html/rfc6455#section-5.2 + // + // The length of the "Payload data", in bytes: if 0-125, that is the payload + // length. + // - If 126, the following 2 bytes interpreted as a 16-bit unsigned + // integer are the payload length. + // - If 127, the following 8 bytes interpreted as + // a 64-bit unsigned integer (the most significant bit MUST be 0) are the + // payload length. Multibyte length quantities are expressed in network byte + // order. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { + return noFrame, err + } + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { + return noFrame, err + } + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + // Don't allow readLength to overflow in the presence of a large readRemaining + // counter. + if c.readLength < 0 { + return noFrame, ErrReadLimit + } + + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.setReadRemaining(0) + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("invalid close code") + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + rem := c.readRemaining + rem -= int64(n) + c.setReadRemaining(rem) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/conn_write.go b/vendor/github.com/gorilla/websocket/conn_write.go new file mode 100644 index 000000000..a509a21f8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_write.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "net" + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/vendor/github.com/gorilla/websocket/conn_write_legacy.go new file mode 100644 index 000000000..37edaff5a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_write_legacy.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +func (c *Conn) writeBufs(bufs ...[]byte) error { + for _, buf := range bufs { + if len(buf) > 0 { + if _, err := c.conn.Write(buf); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 000000000..8db0cef95 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,227 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// a value less than the maximum expected message size can greatly reduce memory +// use with a small impact on performance. Here's an example: If 99% of the +// messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go new file mode 100644 index 000000000..c64f8c829 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 000000000..dc2c1f641 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 000000000..577fce9ef --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 000000000..2aac060e5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 000000000..c854225e9 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan struct{}, 1) + mu <- struct{}{} + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 000000000..e87a8c9f0 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 000000000..887d55891 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,363 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-WebSocket-Protocol). +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + bw.WriteByte(0) + bw.Flush() + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/trace.go b/vendor/github.com/gorilla/websocket/trace.go new file mode 100644 index 000000000..834f122a0 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/trace.go @@ -0,0 +1,19 @@ +// +build go1.8 + +package websocket + +import ( + "crypto/tls" + "net/http/httptrace" +) + +func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { + if trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(tlsConn, cfg) + if trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/trace_17.go b/vendor/github.com/gorilla/websocket/trace_17.go new file mode 100644 index 000000000..77d05a0b5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/trace_17.go @@ -0,0 +1,12 @@ +// +build !go1.8 + +package websocket + +import ( + "crypto/tls" + "net/http/httptrace" +) + +func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { + return doHandshake(tlsConn, cfg) +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 000000000..7bf2f66c6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,283 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if b := s[i]; b != ' ' && b != '\t' { + break + } + } + return s[i:] +} + +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if !isTokenOctet[s[i]] { + break + } + } + return s[:i], s[i:] +} + +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 000000000..2e668f6b8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/k8s.io/metrics/LICENSE b/vendor/k8s.io/metrics/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/k8s.io/metrics/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go deleted file mode 100644 index 9c1153ffc..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=metrics.k8s.io - -// Package metrics defines an API for exposing metrics. -package metrics // import "k8s.io/metrics/pkg/apis/metrics" diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/register.go b/vendor/k8s.io/metrics/pkg/apis/metrics/register.go deleted file mode 100644 index 9384e44b9..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/register.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "metrics.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &NodeMetrics{}, - &NodeMetricsList{}, - &PodMetrics{}, - &PodMetricsList{}, - ) - return nil -} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/types.go b/vendor/k8s.io/metrics/pkg/apis/metrics/types.go deleted file mode 100644 index f1c58c768..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/types.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +resourceName=nodes -// +genclient:readonly -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeMetrics sets resource usage metrics of a node. -type NodeMetrics struct { - metav1.TypeMeta - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // The following fields define time interval from which metrics were - // collected from the interval [Timestamp-Window, Timestamp]. - Timestamp metav1.Time - Window metav1.Duration - - // The memory usage is the memory working set. - Usage corev1.ResourceList -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeMetricsList is a list of NodeMetrics. -type NodeMetricsList struct { - metav1.TypeMeta - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - metav1.ListMeta - - // List of node metrics. - Items []NodeMetrics -} - -// +genclient -// +resourceName=pods -// +genclient:readonly -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodMetrics sets resource usage metrics of a pod. -type PodMetrics struct { - metav1.TypeMeta - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta - - // The following fields define time interval from which metrics were - // collected from the interval [Timestamp-Window, Timestamp]. - Timestamp metav1.Time - Window metav1.Duration - - // Metrics for all containers are collected within the same time window. - Containers []ContainerMetrics -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodMetricsList is a list of PodMetrics. -type PodMetricsList struct { - metav1.TypeMeta - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - metav1.ListMeta - - // List of pod metrics. - Items []PodMetrics -} - -// ContainerMetrics sets resource usage metrics of a container. -type ContainerMetrics struct { - // Container name corresponding to the one from pod.spec.containers. - Name string - // The memory usage is the memory working set. - Usage corev1.ResourceList -} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go deleted file mode 100644 index 8e06b2205..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:conversion-gen=k8s.io/metrics/pkg/apis/metrics -// +k8s:openapi-gen=true -// +groupName=metrics.k8s.io - -// Package v1alpha1 is the v1alpha1 version of the metrics API. -package v1alpha1 // import "k8s.io/metrics/pkg/apis/metrics/v1alpha1" diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go deleted file mode 100644 index c472bacd4..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1758 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto - -package v1alpha1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" - k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" - resource "k8s.io/apimachinery/pkg/api/resource" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *ContainerMetrics) Reset() { *m = ContainerMetrics{} } -func (*ContainerMetrics) ProtoMessage() {} -func (*ContainerMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_4bcbecebae081ea6, []int{0} -} -func (m *ContainerMetrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerMetrics.Merge(m, src) -} -func (m *ContainerMetrics) XXX_Size() int { - return m.Size() -} -func (m *ContainerMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_ContainerMetrics proto.InternalMessageInfo - -func (m *NodeMetrics) Reset() { *m = NodeMetrics{} } -func (*NodeMetrics) ProtoMessage() {} -func (*NodeMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_4bcbecebae081ea6, []int{1} -} -func (m *NodeMetrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeMetrics.Merge(m, src) -} -func (m *NodeMetrics) XXX_Size() int { - return m.Size() -} -func (m *NodeMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_NodeMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeMetrics proto.InternalMessageInfo - -func (m *NodeMetricsList) Reset() { *m = NodeMetricsList{} } -func (*NodeMetricsList) ProtoMessage() {} -func (*NodeMetricsList) Descriptor() ([]byte, []int) { - return fileDescriptor_4bcbecebae081ea6, []int{2} -} -func (m *NodeMetricsList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeMetricsList) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeMetricsList.Merge(m, src) -} -func (m *NodeMetricsList) XXX_Size() int { - return m.Size() -} -func (m *NodeMetricsList) XXX_DiscardUnknown() { - xxx_messageInfo_NodeMetricsList.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeMetricsList proto.InternalMessageInfo - -func (m *PodMetrics) Reset() { *m = PodMetrics{} } -func (*PodMetrics) ProtoMessage() {} -func (*PodMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_4bcbecebae081ea6, []int{3} -} -func (m *PodMetrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodMetrics.Merge(m, src) -} -func (m *PodMetrics) XXX_Size() int { - return m.Size() -} -func (m *PodMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_PodMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_PodMetrics proto.InternalMessageInfo - -func (m *PodMetricsList) Reset() { *m = PodMetricsList{} } -func (*PodMetricsList) ProtoMessage() {} -func (*PodMetricsList) Descriptor() ([]byte, []int) { - return fileDescriptor_4bcbecebae081ea6, []int{4} -} -func (m *PodMetricsList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodMetricsList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodMetricsList.Merge(m, src) -} -func (m *PodMetricsList) XXX_Size() int { - return m.Size() -} -func (m *PodMetricsList) XXX_DiscardUnknown() { - xxx_messageInfo_PodMetricsList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodMetricsList proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ContainerMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.ContainerMetrics") - proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.ContainerMetrics.UsageEntry") - proto.RegisterType((*NodeMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.NodeMetrics") - proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.NodeMetrics.UsageEntry") - proto.RegisterType((*NodeMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.NodeMetricsList") - proto.RegisterType((*PodMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.PodMetrics") - proto.RegisterType((*PodMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.PodMetricsList") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto", fileDescriptor_4bcbecebae081ea6) -} - -var fileDescriptor_4bcbecebae081ea6 = []byte{ - // 661 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x41, 0x4f, 0x13, 0x41, - 0x18, 0xed, 0xd0, 0x96, 0xc0, 0x54, 0x11, 0xf7, 0x44, 0x7a, 0xd8, 0x92, 0x9e, 0x1a, 0x13, 0x66, - 0x85, 0xa0, 0x21, 0x9c, 0xcc, 0x0a, 0x07, 0x13, 0x41, 0xd9, 0xa0, 0x46, 0xf4, 0xe0, 0x74, 0x3b, - 0x6e, 0xc7, 0xb2, 0x3b, 0x9b, 0x99, 0xd9, 0x92, 0xde, 0x8c, 0x7a, 0xf2, 0x64, 0xe2, 0x9f, 0xc2, - 0x78, 0xe1, 0xc8, 0x45, 0x90, 0xf5, 0xee, 0x0f, 0xf0, 0x64, 0x76, 0x76, 0xb6, 0x5b, 0x29, 0xc2, - 0xca, 0xc1, 0x13, 0xb7, 0xee, 0x37, 0xf3, 0xde, 0xfb, 0xe6, 0x7d, 0x6f, 0x26, 0x85, 0x5b, 0xbd, - 0x15, 0x81, 0x28, 0xb3, 0x7a, 0x51, 0x9b, 0xf0, 0x80, 0x48, 0x22, 0xac, 0x3e, 0x09, 0x3a, 0x8c, - 0x5b, 0x7a, 0xc1, 0x27, 0x92, 0x53, 0x57, 0x58, 0x61, 0xcf, 0xb3, 0x70, 0x48, 0xc5, 0xb0, 0xd0, - 0x5f, 0xc4, 0xbb, 0x61, 0x17, 0x2f, 0x5a, 0x1e, 0x09, 0x08, 0xc7, 0x92, 0x74, 0x50, 0xc8, 0x99, - 0x64, 0x46, 0x2b, 0x45, 0x22, 0xbd, 0x11, 0x85, 0x3d, 0x0f, 0x25, 0xc8, 0x61, 0x21, 0x43, 0xd6, - 0x17, 0x3c, 0x2a, 0xbb, 0x51, 0x1b, 0xb9, 0xcc, 0xb7, 0x3c, 0xe6, 0x31, 0x4b, 0x11, 0xb4, 0xa3, - 0xd7, 0xea, 0x4b, 0x7d, 0xa8, 0x5f, 0x29, 0x71, 0xbd, 0xa9, 0x5b, 0xc2, 0x21, 0xb5, 0x5c, 0xc6, - 0x89, 0xd5, 0x1f, 0x13, 0xaf, 0x2f, 0xe7, 0x7b, 0x7c, 0xec, 0x76, 0x69, 0x40, 0xf8, 0x20, 0xeb, - 0xdd, 0xe2, 0x44, 0xb0, 0x88, 0xbb, 0xe4, 0x9f, 0x50, 0xea, 0xc4, 0xf8, 0x2c, 0x2d, 0xeb, 0x6f, - 0x28, 0x1e, 0x05, 0x92, 0xfa, 0xe3, 0x32, 0x77, 0x2f, 0x02, 0x08, 0xb7, 0x4b, 0x7c, 0x7c, 0x1a, - 0xd7, 0x7c, 0x5f, 0x86, 0xb3, 0xf7, 0x59, 0x20, 0x71, 0x82, 0xd8, 0x48, 0x5d, 0x34, 0xe6, 0x61, - 0x25, 0xc0, 0x3e, 0x99, 0x03, 0xf3, 0xa0, 0x35, 0x6d, 0x5f, 0xdb, 0x3f, 0x6a, 0x94, 0xe2, 0xa3, - 0x46, 0x65, 0x13, 0xfb, 0xc4, 0x51, 0x2b, 0x46, 0x0c, 0x60, 0x35, 0x12, 0xd8, 0x23, 0x73, 0x13, - 0xf3, 0xe5, 0x56, 0x6d, 0x69, 0x1d, 0x15, 0x9d, 0x0c, 0x3a, 0xad, 0x86, 0x9e, 0x24, 0x3c, 0xeb, - 0x81, 0xe4, 0x03, 0xfb, 0x03, 0xd0, 0x5a, 0x55, 0x55, 0xfc, 0x75, 0xd4, 0x68, 0x8c, 0x0f, 0x06, - 0x39, 0xda, 0xeb, 0x87, 0x54, 0xc8, 0x77, 0xc7, 0xe7, 0x6e, 0x49, 0x5a, 0xfe, 0x78, 0xdc, 0x58, - 0x28, 0x32, 0x3a, 0xb4, 0x15, 0xe1, 0x40, 0x52, 0x39, 0x70, 0xd2, 0xa3, 0xd5, 0xbb, 0x10, 0xe6, - 0xbd, 0x19, 0xb3, 0xb0, 0xdc, 0x23, 0x83, 0xd4, 0x13, 0x27, 0xf9, 0x69, 0xac, 0xc1, 0x6a, 0x1f, - 0xef, 0x46, 0x89, 0x07, 0xa0, 0x55, 0x5b, 0x42, 0x99, 0x07, 0xa3, 0x2a, 0x99, 0x11, 0xe8, 0x0c, - 0x15, 0x05, 0x5e, 0x9d, 0x58, 0x01, 0xcd, 0x9f, 0x15, 0x58, 0xdb, 0x64, 0x1d, 0x92, 0x0d, 0xe0, - 0x15, 0x9c, 0x4a, 0x92, 0xd1, 0xc1, 0x12, 0x2b, 0xc1, 0xda, 0xd2, 0xed, 0xf3, 0xc8, 0x95, 0xcb, - 0x18, 0xf5, 0x17, 0xd1, 0xa3, 0xf6, 0x1b, 0xe2, 0xca, 0x0d, 0x22, 0xb1, 0x6d, 0x68, 0x2b, 0x61, - 0x5e, 0x73, 0x86, 0xac, 0xc6, 0x0b, 0x38, 0x9d, 0xc4, 0x42, 0x48, 0xec, 0x87, 0xba, 0xff, 0x5b, - 0xc5, 0x24, 0xb6, 0xa9, 0x4f, 0xec, 0x9b, 0x9a, 0x7c, 0x7a, 0x3b, 0x23, 0x71, 0x72, 0x3e, 0xe3, - 0x29, 0x9c, 0xdc, 0xa3, 0x41, 0x87, 0xed, 0xcd, 0x95, 0x2f, 0x76, 0x26, 0x67, 0x5e, 0x8b, 0x38, - 0x96, 0x94, 0x05, 0xf6, 0x8c, 0x66, 0x9f, 0x7c, 0xa6, 0x58, 0x1c, 0xcd, 0x66, 0x7c, 0x1b, 0xa6, - 0xae, 0xa2, 0x52, 0x77, 0xaf, 0x78, 0xea, 0x46, 0xdc, 0xbd, 0x0a, 0x1c, 0x68, 0x7e, 0x05, 0xf0, - 0xc6, 0x88, 0x25, 0xc9, 0xc1, 0x8c, 0x97, 0x63, 0xa1, 0x2b, 0x38, 0xb7, 0x04, 0xad, 0x22, 0x37, - 0xab, 0xcd, 0x9c, 0xca, 0x2a, 0x23, 0x81, 0xdb, 0x81, 0x55, 0x2a, 0x89, 0x2f, 0xf4, 0x83, 0x71, - 0xe7, 0x52, 0xa3, 0xb3, 0xaf, 0x67, 0xe3, 0x7a, 0x90, 0x70, 0x39, 0x29, 0x65, 0xf3, 0x73, 0x19, - 0xc2, 0xc7, 0xac, 0x73, 0x75, 0x7b, 0xce, 0xbd, 0x3d, 0x01, 0x84, 0x6e, 0xf6, 0xf6, 0x0a, 0x7d, - 0x83, 0x56, 0x2f, 0xff, 0x6e, 0xe7, 0x16, 0x0d, 0x57, 0x84, 0x33, 0xa2, 0xd0, 0xfc, 0x02, 0xe0, - 0x4c, 0x3e, 0x95, 0xff, 0x10, 0xb1, 0xe7, 0x7f, 0x46, 0x6c, 0xb9, 0xf8, 0xd9, 0xf2, 0x36, 0xcf, - 0x4e, 0x98, 0xbd, 0xb9, 0x7f, 0x62, 0x96, 0x0e, 0x4e, 0xcc, 0xd2, 0xe1, 0x89, 0x59, 0x7a, 0x1b, - 0x9b, 0x60, 0x3f, 0x36, 0xc1, 0x41, 0x6c, 0x82, 0xc3, 0xd8, 0x04, 0xdf, 0x63, 0x13, 0x7c, 0xfa, - 0x61, 0x96, 0x76, 0x5a, 0x45, 0xff, 0xd8, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x8c, 0xe2, 0xb1, - 0xf3, 0x1c, 0x09, 0x00, 0x00, -} - -func (m *ContainerMetrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerMetrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Usage) > 0 { - keysForUsage := make([]string, 0, len(m.Usage)) - for k := range m.Usage { - keysForUsage = append(keysForUsage, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) - for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { - v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] - baseI := i - { - size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForUsage[iNdEx]) - copy(dAtA[i:], keysForUsage[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NodeMetrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeMetrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Usage) > 0 { - keysForUsage := make([]string, 0, len(m.Usage)) - for k := range m.Usage { - keysForUsage = append(keysForUsage, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) - for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { - v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] - baseI := i - { - size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForUsage[iNdEx]) - copy(dAtA[i:], keysForUsage[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - { - size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NodeMetricsList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeMetricsList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodMetrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodMetrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Containers) > 0 { - for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - { - size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodMetricsList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodMetricsList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ContainerMetrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Usage) > 0 { - for k, v := range m.Usage { - _ = k - _ = v - l = ((*resource.Quantity)(&v)).Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *NodeMetrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Timestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Window.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Usage) > 0 { - for k, v := range m.Usage { - _ = k - _ = v - l = ((*resource.Quantity)(&v)).Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *NodeMetricsList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodMetrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Timestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Window.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodMetricsList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ContainerMetrics) String() string { - if this == nil { - return "nil" - } - keysForUsage := make([]string, 0, len(this.Usage)) - for k := range this.Usage { - keysForUsage = append(keysForUsage, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) - mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" - for _, k := range keysForUsage { - mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) - } - mapStringForUsage += "}" - s := strings.Join([]string{`&ContainerMetrics{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Usage:` + mapStringForUsage + `,`, - `}`, - }, "") - return s -} -func (this *NodeMetrics) String() string { - if this == nil { - return "nil" - } - keysForUsage := make([]string, 0, len(this.Usage)) - for k := range this.Usage { - keysForUsage = append(keysForUsage, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) - mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" - for _, k := range keysForUsage { - mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) - } - mapStringForUsage += "}" - s := strings.Join([]string{`&NodeMetrics{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, - `Usage:` + mapStringForUsage + `,`, - `}`, - }, "") - return s -} -func (this *NodeMetricsList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]NodeMetrics{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NodeMetrics", "NodeMetrics", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&NodeMetricsList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodMetrics) String() string { - if this == nil { - return "nil" - } - repeatedStringForContainers := "[]ContainerMetrics{" - for _, f := range this.Containers { - repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ContainerMetrics", "ContainerMetrics", 1), `&`, ``, 1) + "," - } - repeatedStringForContainers += "}" - s := strings.Join([]string{`&PodMetrics{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, - `Containers:` + repeatedStringForContainers + `,`, - `}`, - }, "") - return s -} -func (this *PodMetricsList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodMetrics{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodMetrics", "PodMetrics", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodMetricsList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerMetrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerMetrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = make(k8s_io_api_core_v1.ResourceList) - } - var mapkey k8s_io_api_core_v1.ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeMetrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeMetrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeMetrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = make(k8s_io_api_core_v1.ResourceList) - } - var mapkey k8s_io_api_core_v1.ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeMetricsList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeMetricsList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, NodeMetrics{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodMetrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodMetrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodMetrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Containers = append(m.Containers, ContainerMetrics{}) - if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodMetricsList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodMetricsList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodMetrics{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto deleted file mode 100644 index d1938a85c..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.metrics.pkg.apis.metrics.v1alpha1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/metrics/pkg/apis/metrics/v1alpha1"; - -// ContainerMetrics sets resource usage metrics of a container. -message ContainerMetrics { - // Container name corresponding to the one from pod.spec.containers. - optional string name = 1; - - // The memory usage is the memory working set. - map usage = 2; -} - -// NodeMetrics sets resource usage metrics of a node. -message NodeMetrics { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The following fields define time interval from which metrics were - // collected from the interval [Timestamp-Window, Timestamp]. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; - - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; - - // The memory usage is the memory working set. - map usage = 4; -} - -// NodeMetricsList is a list of NodeMetrics. -message NodeMetricsList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of node metrics. - repeated NodeMetrics items = 2; -} - -// PodMetrics sets resource usage metrics of a pod. -message PodMetrics { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The following fields define time interval from which metrics were - // collected from the interval [Timestamp-Window, Timestamp]. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; - - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; - - // Metrics for all containers are collected within the same time window. - repeated ContainerMetrics containers = 4; -} - -// PodMetricsList is a list of PodMetrics. -message PodMetricsList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of pod metrics. - repeated PodMetrics items = 2; -} - diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/register.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/register.go deleted file mode 100644 index 3e5359a8e..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "metrics.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder points to a list of functions added to Scheme. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - // AddToScheme applies all the stored functions to the scheme. - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &NodeMetrics{}, - &NodeMetricsList{}, - &PodMetrics{}, - &PodMetricsList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go deleted file mode 100644 index 871a3b177..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +resourceName=nodes -// +genclient:readonly -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeMetrics sets resource usage metrics of a node. -type NodeMetrics struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The following fields define time interval from which metrics were - // collected from the interval [Timestamp-Window, Timestamp]. - Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` - Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` - - // The memory usage is the memory working set. - Usage v1.ResourceList `json:"usage" protobuf:"bytes,4,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeMetricsList is a list of NodeMetrics. -type NodeMetricsList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of node metrics. - Items []NodeMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +resourceName=pods -// +genclient:readonly -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodMetrics sets resource usage metrics of a pod. -type PodMetrics struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The following fields define time interval from which metrics were - // collected from the interval [Timestamp-Window, Timestamp]. - Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` - Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` - - // Metrics for all containers are collected within the same time window. - Containers []ContainerMetrics `json:"containers" protobuf:"bytes,4,rep,name=containers"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodMetricsList is a list of PodMetrics. -type PodMetricsList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of pod metrics. - Items []PodMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ContainerMetrics sets resource usage metrics of a container. -type ContainerMetrics struct { - // Container name corresponding to the one from pod.spec.containers. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // The memory usage is the memory working set. - Usage v1.ResourceList `json:"usage" protobuf:"bytes,2,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` -} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index f29d64659..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,209 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - unsafe "unsafe" - - v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - metrics "k8s.io/metrics/pkg/apis/metrics" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*ContainerMetrics)(nil), (*metrics.ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(a.(*ContainerMetrics), b.(*metrics.ContainerMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*metrics.ContainerMetrics)(nil), (*ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(a.(*metrics.ContainerMetrics), b.(*ContainerMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NodeMetrics)(nil), (*metrics.NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(a.(*NodeMetrics), b.(*metrics.NodeMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*metrics.NodeMetrics)(nil), (*NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(a.(*metrics.NodeMetrics), b.(*NodeMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NodeMetricsList)(nil), (*metrics.NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(a.(*NodeMetricsList), b.(*metrics.NodeMetricsList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*metrics.NodeMetricsList)(nil), (*NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(a.(*metrics.NodeMetricsList), b.(*NodeMetricsList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PodMetrics)(nil), (*metrics.PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_PodMetrics_To_metrics_PodMetrics(a.(*PodMetrics), b.(*metrics.PodMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*metrics.PodMetrics)(nil), (*PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_metrics_PodMetrics_To_v1alpha1_PodMetrics(a.(*metrics.PodMetrics), b.(*PodMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PodMetricsList)(nil), (*metrics.PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(a.(*PodMetricsList), b.(*metrics.PodMetricsList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*metrics.PodMetricsList)(nil), (*PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(a.(*metrics.PodMetricsList), b.(*PodMetricsList), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { - out.Name = in.Name - out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) - return nil -} - -// Convert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics is an autogenerated conversion function. -func Convert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { - return autoConvert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(in, out, s) -} - -func autoConvert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { - out.Name = in.Name - out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) - return nil -} - -// Convert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics is an autogenerated conversion function. -func Convert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { - return autoConvert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(in, out, s) -} - -func autoConvert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Timestamp = in.Timestamp - out.Window = in.Window - out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) - return nil -} - -// Convert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics is an autogenerated conversion function. -func Convert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { - return autoConvert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(in, out, s) -} - -func autoConvert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Timestamp = in.Timestamp - out.Window = in.Window - out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) - return nil -} - -// Convert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics is an autogenerated conversion function. -func Convert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { - return autoConvert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(in, out, s) -} - -func autoConvert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]metrics.NodeMetrics)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList is an autogenerated conversion function. -func Convert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { - return autoConvert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(in, out, s) -} - -func autoConvert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NodeMetrics)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList is an autogenerated conversion function. -func Convert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { - return autoConvert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(in, out, s) -} - -func autoConvert_v1alpha1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Timestamp = in.Timestamp - out.Window = in.Window - out.Containers = *(*[]metrics.ContainerMetrics)(unsafe.Pointer(&in.Containers)) - return nil -} - -// Convert_v1alpha1_PodMetrics_To_metrics_PodMetrics is an autogenerated conversion function. -func Convert_v1alpha1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { - return autoConvert_v1alpha1_PodMetrics_To_metrics_PodMetrics(in, out, s) -} - -func autoConvert_metrics_PodMetrics_To_v1alpha1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Timestamp = in.Timestamp - out.Window = in.Window - out.Containers = *(*[]ContainerMetrics)(unsafe.Pointer(&in.Containers)) - return nil -} - -// Convert_metrics_PodMetrics_To_v1alpha1_PodMetrics is an autogenerated conversion function. -func Convert_metrics_PodMetrics_To_v1alpha1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { - return autoConvert_metrics_PodMetrics_To_v1alpha1_PodMetrics(in, out, s) -} - -func autoConvert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]metrics.PodMetrics)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList is an autogenerated conversion function. -func Convert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { - return autoConvert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(in, out, s) -} - -func autoConvert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]PodMetrics)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList is an autogenerated conversion function. -func Convert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { - return autoConvert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(in, out, s) -} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 9cd8619ec..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,186 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { - *out = *in - if in.Usage != nil { - in, out := &in.Usage, &out.Usage - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerMetrics. -func (in *ContainerMetrics) DeepCopy() *ContainerMetrics { - if in == nil { - return nil - } - out := new(ContainerMetrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeMetrics) DeepCopyInto(out *NodeMetrics) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Timestamp.DeepCopyInto(&out.Timestamp) - out.Window = in.Window - if in.Usage != nil { - in, out := &in.Usage, &out.Usage - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetrics. -func (in *NodeMetrics) DeepCopy() *NodeMetrics { - if in == nil { - return nil - } - out := new(NodeMetrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeMetrics) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeMetricsList) DeepCopyInto(out *NodeMetricsList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NodeMetrics, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetricsList. -func (in *NodeMetricsList) DeepCopy() *NodeMetricsList { - if in == nil { - return nil - } - out := new(NodeMetricsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeMetricsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodMetrics) DeepCopyInto(out *PodMetrics) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Timestamp.DeepCopyInto(&out.Timestamp) - out.Window = in.Window - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]ContainerMetrics, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetrics. -func (in *PodMetrics) DeepCopy() *PodMetrics { - if in == nil { - return nil - } - out := new(PodMetrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodMetrics) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodMetricsList) DeepCopyInto(out *PodMetricsList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodMetrics, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsList. -func (in *PodMetricsList) DeepCopy() *PodMetricsList { - if in == nil { - return nil - } - out := new(PodMetricsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodMetricsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go deleted file mode 100644 index 10f5ab9fa..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:conversion-gen=k8s.io/metrics/pkg/apis/metrics -// +k8s:openapi-gen=true -// +groupName=metrics.k8s.io - -// Package v1beta1 is the v1beta1 version of the metrics API. -package v1beta1 // import "k8s.io/metrics/pkg/apis/metrics/v1beta1" diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go deleted file mode 100644 index 20f558b89..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go +++ /dev/null @@ -1,1758 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto - -package v1beta1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" - k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" - resource "k8s.io/apimachinery/pkg/api/resource" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *ContainerMetrics) Reset() { *m = ContainerMetrics{} } -func (*ContainerMetrics) ProtoMessage() {} -func (*ContainerMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_3e7a045767f4b09f, []int{0} -} -func (m *ContainerMetrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerMetrics.Merge(m, src) -} -func (m *ContainerMetrics) XXX_Size() int { - return m.Size() -} -func (m *ContainerMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_ContainerMetrics proto.InternalMessageInfo - -func (m *NodeMetrics) Reset() { *m = NodeMetrics{} } -func (*NodeMetrics) ProtoMessage() {} -func (*NodeMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_3e7a045767f4b09f, []int{1} -} -func (m *NodeMetrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeMetrics.Merge(m, src) -} -func (m *NodeMetrics) XXX_Size() int { - return m.Size() -} -func (m *NodeMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_NodeMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeMetrics proto.InternalMessageInfo - -func (m *NodeMetricsList) Reset() { *m = NodeMetricsList{} } -func (*NodeMetricsList) ProtoMessage() {} -func (*NodeMetricsList) Descriptor() ([]byte, []int) { - return fileDescriptor_3e7a045767f4b09f, []int{2} -} -func (m *NodeMetricsList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeMetricsList) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeMetricsList.Merge(m, src) -} -func (m *NodeMetricsList) XXX_Size() int { - return m.Size() -} -func (m *NodeMetricsList) XXX_DiscardUnknown() { - xxx_messageInfo_NodeMetricsList.DiscardUnknown(m) -} - -var xxx_messageInfo_NodeMetricsList proto.InternalMessageInfo - -func (m *PodMetrics) Reset() { *m = PodMetrics{} } -func (*PodMetrics) ProtoMessage() {} -func (*PodMetrics) Descriptor() ([]byte, []int) { - return fileDescriptor_3e7a045767f4b09f, []int{3} -} -func (m *PodMetrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodMetrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodMetrics.Merge(m, src) -} -func (m *PodMetrics) XXX_Size() int { - return m.Size() -} -func (m *PodMetrics) XXX_DiscardUnknown() { - xxx_messageInfo_PodMetrics.DiscardUnknown(m) -} - -var xxx_messageInfo_PodMetrics proto.InternalMessageInfo - -func (m *PodMetricsList) Reset() { *m = PodMetricsList{} } -func (*PodMetricsList) ProtoMessage() {} -func (*PodMetricsList) Descriptor() ([]byte, []int) { - return fileDescriptor_3e7a045767f4b09f, []int{4} -} -func (m *PodMetricsList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodMetricsList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodMetricsList.Merge(m, src) -} -func (m *PodMetricsList) XXX_Size() int { - return m.Size() -} -func (m *PodMetricsList) XXX_DiscardUnknown() { - xxx_messageInfo_PodMetricsList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodMetricsList proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ContainerMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.ContainerMetrics") - proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.ContainerMetrics.UsageEntry") - proto.RegisterType((*NodeMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetrics") - proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetrics.UsageEntry") - proto.RegisterType((*NodeMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetricsList") - proto.RegisterType((*PodMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.PodMetrics") - proto.RegisterType((*PodMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.PodMetricsList") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto", fileDescriptor_3e7a045767f4b09f) -} - -var fileDescriptor_3e7a045767f4b09f = []byte{ - // 661 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xbf, 0x6f, 0x13, 0x3f, - 0x1c, 0x8d, 0x9b, 0xa4, 0x6a, 0x9d, 0xef, 0xb7, 0x94, 0x9b, 0xaa, 0x0c, 0x97, 0x2a, 0x4b, 0x2b, - 0xa4, 0xda, 0xb4, 0x54, 0xa8, 0xb0, 0x20, 0x1d, 0x65, 0x40, 0xa2, 0xa5, 0x9c, 0xca, 0x6f, 0x06, - 0x9c, 0x8b, 0xb9, 0x98, 0x70, 0xe7, 0xc8, 0xf6, 0xa5, 0xca, 0x86, 0x2a, 0x26, 0x26, 0xc4, 0x5f, - 0x15, 0x31, 0x75, 0xec, 0x80, 0x5a, 0x12, 0x66, 0xfe, 0x01, 0x26, 0x74, 0x3e, 0x5f, 0x2e, 0x34, - 0xa5, 0x3d, 0x3a, 0x30, 0x75, 0xcb, 0x7d, 0xec, 0xf7, 0xde, 0xc7, 0xef, 0xf3, 0x6c, 0x05, 0xee, - 0xb4, 0x37, 0x24, 0x62, 0x1c, 0xb7, 0xa3, 0x06, 0x15, 0x21, 0x55, 0x54, 0xe2, 0x2e, 0x0d, 0x9b, - 0x5c, 0x60, 0xb3, 0x10, 0x50, 0x25, 0x98, 0x27, 0x71, 0xa7, 0xed, 0x63, 0xd2, 0x61, 0x72, 0x54, - 0xe8, 0xae, 0x36, 0xa8, 0x22, 0xab, 0xd8, 0xa7, 0x21, 0x15, 0x44, 0xd1, 0x26, 0xea, 0x08, 0xae, - 0xb8, 0xb5, 0x94, 0x00, 0x91, 0xd9, 0x87, 0x3a, 0x6d, 0x1f, 0xc5, 0xc0, 0x51, 0xc1, 0x00, 0xab, - 0x2b, 0x3e, 0x53, 0xad, 0xa8, 0x81, 0x3c, 0x1e, 0x60, 0x9f, 0xfb, 0x1c, 0x6b, 0x7c, 0x23, 0x7a, - 0xa3, 0xbf, 0xf4, 0x87, 0xfe, 0x95, 0xf0, 0x56, 0xeb, 0xa6, 0x21, 0xd2, 0x61, 0xd8, 0xe3, 0x82, - 0xe2, 0xee, 0x84, 0x76, 0x75, 0x3d, 0xdb, 0x13, 0x10, 0xaf, 0xc5, 0x42, 0x2a, 0x7a, 0x69, 0xe7, - 0x58, 0x50, 0xc9, 0x23, 0xe1, 0xd1, 0xbf, 0x42, 0xe9, 0xf3, 0x92, 0xd3, 0xb4, 0xf0, 0x9f, 0x50, - 0x22, 0x0a, 0x15, 0x0b, 0x26, 0x65, 0x6e, 0x9e, 0x07, 0x90, 0x5e, 0x8b, 0x06, 0xe4, 0x24, 0xae, - 0xbe, 0x5f, 0x84, 0xf3, 0x77, 0x79, 0xa8, 0x48, 0x8c, 0xd8, 0x4a, 0x4c, 0xb4, 0x16, 0x61, 0x29, - 0x24, 0x01, 0x5d, 0x00, 0x8b, 0x60, 0x79, 0xd6, 0xf9, 0xaf, 0x7f, 0x54, 0x2b, 0x0c, 0x8f, 0x6a, - 0xa5, 0x6d, 0x12, 0x50, 0x57, 0xaf, 0x58, 0x03, 0x00, 0xcb, 0x91, 0x24, 0x3e, 0x5d, 0x98, 0x5a, - 0x2c, 0x2e, 0x57, 0xd6, 0x36, 0x51, 0xce, 0xc1, 0xa0, 0x93, 0x62, 0xe8, 0x71, 0x4c, 0x73, 0x2f, - 0x54, 0xa2, 0xe7, 0x7c, 0x00, 0x46, 0xaa, 0xac, 0x8b, 0x3f, 0x8f, 0x6a, 0xb5, 0xc9, 0xb9, 0x20, - 0xd7, 0x58, 0xfd, 0x80, 0x49, 0xb5, 0x7f, 0x7c, 0xe6, 0x96, 0xb8, 0xe3, 0x8f, 0xc7, 0xb5, 0x95, - 0x3c, 0x93, 0x43, 0x8f, 0x22, 0x12, 0x2a, 0xa6, 0x7a, 0x6e, 0x72, 0xb2, 0x6a, 0x0b, 0xc2, 0xac, - 0x37, 0x6b, 0x1e, 0x16, 0xdb, 0xb4, 0x97, 0x58, 0xe2, 0xc6, 0x3f, 0xad, 0x4d, 0x58, 0xee, 0x92, - 0x77, 0x51, 0x6c, 0x01, 0x58, 0xae, 0xac, 0xa1, 0xd4, 0x82, 0x71, 0x95, 0xd4, 0x07, 0x74, 0x8a, - 0x8a, 0x06, 0xdf, 0x9e, 0xda, 0x00, 0xf5, 0x1f, 0x25, 0x58, 0xd9, 0xe6, 0x4d, 0x9a, 0xfa, 0xff, - 0x1a, 0xce, 0xc4, 0xc1, 0x68, 0x12, 0x45, 0xb4, 0x60, 0x65, 0xed, 0xfa, 0x59, 0xe4, 0xda, 0x64, - 0x82, 0xba, 0xab, 0xe8, 0x61, 0xe3, 0x2d, 0xf5, 0xd4, 0x16, 0x55, 0xc4, 0xb1, 0x8c, 0x95, 0x30, - 0xab, 0xb9, 0x23, 0x56, 0xeb, 0x25, 0x9c, 0x8d, 0x53, 0x21, 0x15, 0x09, 0x3a, 0xa6, 0xff, 0x6b, - 0xf9, 0x24, 0x76, 0x59, 0x40, 0x9d, 0xab, 0x86, 0x7c, 0x76, 0x37, 0x25, 0x71, 0x33, 0x3e, 0xeb, - 0x09, 0x9c, 0xde, 0x63, 0x61, 0x93, 0xef, 0x2d, 0x14, 0xcf, 0x77, 0x26, 0x63, 0xde, 0x8c, 0x04, - 0x51, 0x8c, 0x87, 0xce, 0x9c, 0x61, 0x9f, 0x7e, 0xaa, 0x59, 0x5c, 0xc3, 0x66, 0x7d, 0x1d, 0x85, - 0xae, 0xa4, 0x43, 0x77, 0x27, 0x77, 0xe8, 0xc6, 0xcc, 0xbd, 0xcc, 0x1b, 0xa8, 0x7f, 0x01, 0xf0, - 0xca, 0x98, 0x25, 0xf1, 0xc1, 0xac, 0x57, 0x13, 0x99, 0xcb, 0x39, 0xb6, 0x18, 0xad, 0x13, 0x37, - 0x6f, 0xcc, 0x9c, 0x49, 0x2b, 0x63, 0x79, 0x7b, 0x0e, 0xcb, 0x4c, 0xd1, 0x40, 0x9a, 0xe7, 0x62, - 0xfd, 0x22, 0x93, 0x73, 0xfe, 0x4f, 0xa7, 0x75, 0x3f, 0xa6, 0x72, 0x13, 0xc6, 0xfa, 0xe7, 0x22, - 0x84, 0x3b, 0xbc, 0x79, 0x79, 0x77, 0xce, 0xbc, 0x3b, 0x01, 0x84, 0x5e, 0xfa, 0xf2, 0x4a, 0x73, - 0x7f, 0x6e, 0x5d, 0xf8, 0xd1, 0xce, 0x1c, 0x1a, 0xad, 0x48, 0x77, 0x4c, 0xa0, 0xde, 0x07, 0x70, - 0x2e, 0x1b, 0xca, 0x3f, 0x08, 0xd8, 0xb3, 0xdf, 0x03, 0x76, 0x23, 0xf7, 0xd1, 0xb2, 0x2e, 0x4f, - 0xcf, 0x97, 0xb3, 0xd5, 0x1f, 0xd8, 0x85, 0x83, 0x81, 0x5d, 0x38, 0x1c, 0xd8, 0x85, 0xf7, 0x43, - 0x1b, 0xf4, 0x87, 0x36, 0x38, 0x18, 0xda, 0xe0, 0x70, 0x68, 0x83, 0x6f, 0x43, 0x1b, 0x7c, 0xfa, - 0x6e, 0x17, 0x5e, 0x2c, 0xe5, 0xfc, 0x47, 0xf3, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x26, 0x83, 0xd3, - 0x6d, 0x14, 0x09, 0x00, 0x00, -} - -func (m *ContainerMetrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerMetrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Usage) > 0 { - keysForUsage := make([]string, 0, len(m.Usage)) - for k := range m.Usage { - keysForUsage = append(keysForUsage, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) - for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { - v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] - baseI := i - { - size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForUsage[iNdEx]) - copy(dAtA[i:], keysForUsage[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NodeMetrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeMetrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Usage) > 0 { - keysForUsage := make([]string, 0, len(m.Usage)) - for k := range m.Usage { - keysForUsage = append(keysForUsage, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) - for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { - v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] - baseI := i - { - size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForUsage[iNdEx]) - copy(dAtA[i:], keysForUsage[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - { - size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NodeMetricsList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeMetricsList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodMetrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodMetrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Containers) > 0 { - for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - { - size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodMetricsList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodMetricsList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ContainerMetrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Usage) > 0 { - for k, v := range m.Usage { - _ = k - _ = v - l = ((*resource.Quantity)(&v)).Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *NodeMetrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Timestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Window.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Usage) > 0 { - for k, v := range m.Usage { - _ = k - _ = v - l = ((*resource.Quantity)(&v)).Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *NodeMetricsList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodMetrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Timestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Window.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodMetricsList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ContainerMetrics) String() string { - if this == nil { - return "nil" - } - keysForUsage := make([]string, 0, len(this.Usage)) - for k := range this.Usage { - keysForUsage = append(keysForUsage, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) - mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" - for _, k := range keysForUsage { - mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) - } - mapStringForUsage += "}" - s := strings.Join([]string{`&ContainerMetrics{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Usage:` + mapStringForUsage + `,`, - `}`, - }, "") - return s -} -func (this *NodeMetrics) String() string { - if this == nil { - return "nil" - } - keysForUsage := make([]string, 0, len(this.Usage)) - for k := range this.Usage { - keysForUsage = append(keysForUsage, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) - mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" - for _, k := range keysForUsage { - mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) - } - mapStringForUsage += "}" - s := strings.Join([]string{`&NodeMetrics{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, - `Usage:` + mapStringForUsage + `,`, - `}`, - }, "") - return s -} -func (this *NodeMetricsList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]NodeMetrics{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NodeMetrics", "NodeMetrics", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&NodeMetricsList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodMetrics) String() string { - if this == nil { - return "nil" - } - repeatedStringForContainers := "[]ContainerMetrics{" - for _, f := range this.Containers { - repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ContainerMetrics", "ContainerMetrics", 1), `&`, ``, 1) + "," - } - repeatedStringForContainers += "}" - s := strings.Join([]string{`&PodMetrics{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, - `Containers:` + repeatedStringForContainers + `,`, - `}`, - }, "") - return s -} -func (this *PodMetricsList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodMetrics{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodMetrics", "PodMetrics", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodMetricsList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerMetrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerMetrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = make(k8s_io_api_core_v1.ResourceList) - } - var mapkey k8s_io_api_core_v1.ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeMetrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeMetrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeMetrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = make(k8s_io_api_core_v1.ResourceList) - } - var mapkey k8s_io_api_core_v1.ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeMetricsList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeMetricsList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, NodeMetrics{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodMetrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodMetrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodMetrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Containers = append(m.Containers, ContainerMetrics{}) - if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodMetricsList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodMetricsList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodMetrics{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto deleted file mode 100644 index b2c1f6d73..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package k8s.io.metrics.pkg.apis.metrics.v1beta1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "k8s.io/metrics/pkg/apis/metrics/v1beta1"; - -// ContainerMetrics sets resource usage metrics of a container. -message ContainerMetrics { - // Container name corresponding to the one from pod.spec.containers. - optional string name = 1; - - // The memory usage is the memory working set. - map usage = 2; -} - -// NodeMetrics sets resource usage metrics of a node. -message NodeMetrics { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The following fields define time interval from which metrics were - // collected from the interval [Timestamp-Window, Timestamp]. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; - - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; - - // The memory usage is the memory working set. - map usage = 4; -} - -// NodeMetricsList is a list of NodeMetrics. -message NodeMetricsList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of node metrics. - repeated NodeMetrics items = 2; -} - -// PodMetrics sets resource usage metrics of a pod. -message PodMetrics { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The following fields define time interval from which metrics were - // collected from the interval [Timestamp-Window, Timestamp]. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; - - optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; - - // Metrics for all containers are collected within the same time window. - repeated ContainerMetrics containers = 4; -} - -// PodMetricsList is a list of PodMetrics. -message PodMetricsList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of pod metrics. - repeated PodMetrics items = 2; -} - diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go deleted file mode 100644 index 205d253c7..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "metrics.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder points to a list of functions added to Scheme. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - // AddToScheme applies all the stored functions to the scheme. - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &NodeMetrics{}, - &NodeMetricsList{}, - &PodMetrics{}, - &PodMetricsList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go deleted file mode 100644 index 530797b5b..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +resourceName=nodes -// +genclient:readonly -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeMetrics sets resource usage metrics of a node. -type NodeMetrics struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The following fields define time interval from which metrics were - // collected from the interval [Timestamp-Window, Timestamp]. - Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` - Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` - - // The memory usage is the memory working set. - Usage v1.ResourceList `json:"usage" protobuf:"bytes,4,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeMetricsList is a list of NodeMetrics. -type NodeMetricsList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of node metrics. - Items []NodeMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +resourceName=pods -// +genclient:readonly -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodMetrics sets resource usage metrics of a pod. -type PodMetrics struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The following fields define time interval from which metrics were - // collected from the interval [Timestamp-Window, Timestamp]. - Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` - Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` - - // Metrics for all containers are collected within the same time window. - Containers []ContainerMetrics `json:"containers" protobuf:"bytes,4,rep,name=containers"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodMetricsList is a list of PodMetrics. -type PodMetricsList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of pod metrics. - Items []PodMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ContainerMetrics sets resource usage metrics of a container. -type ContainerMetrics struct { - // Container name corresponding to the one from pod.spec.containers. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // The memory usage is the memory working set. - Usage v1.ResourceList `json:"usage" protobuf:"bytes,2,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` -} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 112c4c707..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,209 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1beta1 - -import ( - unsafe "unsafe" - - v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - metrics "k8s.io/metrics/pkg/apis/metrics" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*ContainerMetrics)(nil), (*metrics.ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(a.(*ContainerMetrics), b.(*metrics.ContainerMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*metrics.ContainerMetrics)(nil), (*ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(a.(*metrics.ContainerMetrics), b.(*ContainerMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NodeMetrics)(nil), (*metrics.NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(a.(*NodeMetrics), b.(*metrics.NodeMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*metrics.NodeMetrics)(nil), (*NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(a.(*metrics.NodeMetrics), b.(*NodeMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NodeMetricsList)(nil), (*metrics.NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(a.(*NodeMetricsList), b.(*metrics.NodeMetricsList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*metrics.NodeMetricsList)(nil), (*NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(a.(*metrics.NodeMetricsList), b.(*NodeMetricsList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PodMetrics)(nil), (*metrics.PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_PodMetrics_To_metrics_PodMetrics(a.(*PodMetrics), b.(*metrics.PodMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*metrics.PodMetrics)(nil), (*PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_metrics_PodMetrics_To_v1beta1_PodMetrics(a.(*metrics.PodMetrics), b.(*PodMetrics), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PodMetricsList)(nil), (*metrics.PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(a.(*PodMetricsList), b.(*metrics.PodMetricsList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*metrics.PodMetricsList)(nil), (*PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(a.(*metrics.PodMetricsList), b.(*PodMetricsList), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { - out.Name = in.Name - out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) - return nil -} - -// Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics is an autogenerated conversion function. -func Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { - return autoConvert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in, out, s) -} - -func autoConvert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { - out.Name = in.Name - out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) - return nil -} - -// Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics is an autogenerated conversion function. -func Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { - return autoConvert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in, out, s) -} - -func autoConvert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Timestamp = in.Timestamp - out.Window = in.Window - out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) - return nil -} - -// Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics is an autogenerated conversion function. -func Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { - return autoConvert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in, out, s) -} - -func autoConvert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Timestamp = in.Timestamp - out.Window = in.Window - out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) - return nil -} - -// Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics is an autogenerated conversion function. -func Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { - return autoConvert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in, out, s) -} - -func autoConvert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]metrics.NodeMetrics)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList is an autogenerated conversion function. -func Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { - return autoConvert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in, out, s) -} - -func autoConvert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]NodeMetrics)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList is an autogenerated conversion function. -func Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { - return autoConvert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in, out, s) -} - -func autoConvert_v1beta1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Timestamp = in.Timestamp - out.Window = in.Window - out.Containers = *(*[]metrics.ContainerMetrics)(unsafe.Pointer(&in.Containers)) - return nil -} - -// Convert_v1beta1_PodMetrics_To_metrics_PodMetrics is an autogenerated conversion function. -func Convert_v1beta1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { - return autoConvert_v1beta1_PodMetrics_To_metrics_PodMetrics(in, out, s) -} - -func autoConvert_metrics_PodMetrics_To_v1beta1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Timestamp = in.Timestamp - out.Window = in.Window - out.Containers = *(*[]ContainerMetrics)(unsafe.Pointer(&in.Containers)) - return nil -} - -// Convert_metrics_PodMetrics_To_v1beta1_PodMetrics is an autogenerated conversion function. -func Convert_metrics_PodMetrics_To_v1beta1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { - return autoConvert_metrics_PodMetrics_To_v1beta1_PodMetrics(in, out, s) -} - -func autoConvert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]metrics.PodMetrics)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList is an autogenerated conversion function. -func Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { - return autoConvert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in, out, s) -} - -func autoConvert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]PodMetrics)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList is an autogenerated conversion function. -func Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { - return autoConvert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in, out, s) -} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index f043d4642..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,186 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { - *out = *in - if in.Usage != nil { - in, out := &in.Usage, &out.Usage - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerMetrics. -func (in *ContainerMetrics) DeepCopy() *ContainerMetrics { - if in == nil { - return nil - } - out := new(ContainerMetrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeMetrics) DeepCopyInto(out *NodeMetrics) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Timestamp.DeepCopyInto(&out.Timestamp) - out.Window = in.Window - if in.Usage != nil { - in, out := &in.Usage, &out.Usage - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetrics. -func (in *NodeMetrics) DeepCopy() *NodeMetrics { - if in == nil { - return nil - } - out := new(NodeMetrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeMetrics) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeMetricsList) DeepCopyInto(out *NodeMetricsList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NodeMetrics, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetricsList. -func (in *NodeMetricsList) DeepCopy() *NodeMetricsList { - if in == nil { - return nil - } - out := new(NodeMetricsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeMetricsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodMetrics) DeepCopyInto(out *PodMetrics) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Timestamp.DeepCopyInto(&out.Timestamp) - out.Window = in.Window - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]ContainerMetrics, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetrics. -func (in *PodMetrics) DeepCopy() *PodMetrics { - if in == nil { - return nil - } - out := new(PodMetrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodMetrics) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodMetricsList) DeepCopyInto(out *PodMetricsList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodMetrics, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsList. -func (in *PodMetricsList) DeepCopy() *PodMetricsList { - if in == nil { - return nil - } - out := new(PodMetricsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodMetricsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go deleted file mode 100644 index c063c9b28..000000000 --- a/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go +++ /dev/null @@ -1,186 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package metrics - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { - *out = *in - if in.Usage != nil { - in, out := &in.Usage, &out.Usage - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerMetrics. -func (in *ContainerMetrics) DeepCopy() *ContainerMetrics { - if in == nil { - return nil - } - out := new(ContainerMetrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeMetrics) DeepCopyInto(out *NodeMetrics) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Timestamp.DeepCopyInto(&out.Timestamp) - out.Window = in.Window - if in.Usage != nil { - in, out := &in.Usage, &out.Usage - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetrics. -func (in *NodeMetrics) DeepCopy() *NodeMetrics { - if in == nil { - return nil - } - out := new(NodeMetrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeMetrics) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeMetricsList) DeepCopyInto(out *NodeMetricsList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NodeMetrics, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetricsList. -func (in *NodeMetricsList) DeepCopy() *NodeMetricsList { - if in == nil { - return nil - } - out := new(NodeMetricsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeMetricsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodMetrics) DeepCopyInto(out *PodMetrics) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Timestamp.DeepCopyInto(&out.Timestamp) - out.Window = in.Window - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]ContainerMetrics, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetrics. -func (in *PodMetrics) DeepCopy() *PodMetrics { - if in == nil { - return nil - } - out := new(PodMetrics) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodMetrics) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodMetricsList) DeepCopyInto(out *PodMetricsList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodMetrics, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsList. -func (in *PodMetricsList) DeepCopy() *PodMetricsList { - if in == nil { - return nil - } - out := new(PodMetricsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodMetricsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go deleted file mode 100644 index f5fba6d0c..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package versioned - -import ( - "fmt" - "net/http" - - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" - metricsv1alpha1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1" - metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - MetricsV1alpha1() metricsv1alpha1.MetricsV1alpha1Interface - MetricsV1beta1() metricsv1beta1.MetricsV1beta1Interface -} - -// Clientset contains the clients for groups. -type Clientset struct { - *discovery.DiscoveryClient - metricsV1alpha1 *metricsv1alpha1.MetricsV1alpha1Client - metricsV1beta1 *metricsv1beta1.MetricsV1beta1Client -} - -// MetricsV1alpha1 retrieves the MetricsV1alpha1Client -func (c *Clientset) MetricsV1alpha1() metricsv1alpha1.MetricsV1alpha1Interface { - return c.metricsV1alpha1 -} - -// MetricsV1beta1 retrieves the MetricsV1beta1Client -func (c *Clientset) MetricsV1beta1() metricsv1beta1.MetricsV1beta1Interface { - return c.metricsV1beta1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -// If config's RateLimiter is not set and QPS and Burst are acceptable, -// NewForConfig will generate a rate-limiter in configShallowCopy. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - - if configShallowCopy.UserAgent == "" { - configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() - } - - // share the transport between all clients - httpClient, err := rest.HTTPClientFor(&configShallowCopy) - if err != nil { - return nil, err - } - - return NewForConfigAndClient(&configShallowCopy, httpClient) -} - -// NewForConfigAndClient creates a new Clientset for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -// If config's RateLimiter is not set and QPS and Burst are acceptable, -// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. -func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - if configShallowCopy.Burst <= 0 { - return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") - } - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - - var cs Clientset - var err error - cs.metricsV1alpha1, err = metricsv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } - cs.metricsV1beta1, err = metricsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - cs, err := NewForConfig(c) - if err != nil { - panic(err) - } - return cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.metricsV1alpha1 = metricsv1alpha1.New(c) - cs.metricsV1beta1 = metricsv1beta1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/doc.go deleted file mode 100644 index 41721ca52..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/doc.go deleted file mode 100644 index 7dc375616..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go deleted file mode 100644 index a92b020a9..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - metricsv1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" - metricsv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - metricsv1alpha1.AddToScheme, - metricsv1beta1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(Scheme)) -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/doc.go deleted file mode 100644 index df51baa4d..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/generated_expansion.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/generated_expansion.go deleted file mode 100644 index e8fc33bbb..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type NodeMetricsExpansion interface{} - -type PodMetricsExpansion interface{} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/metrics_client.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/metrics_client.go deleted file mode 100644 index efc23042d..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/metrics_client.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "net/http" - - rest "k8s.io/client-go/rest" - v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" - "k8s.io/metrics/pkg/client/clientset/versioned/scheme" -) - -type MetricsV1alpha1Interface interface { - RESTClient() rest.Interface - NodeMetricsesGetter - PodMetricsesGetter -} - -// MetricsV1alpha1Client is used to interact with features provided by the metrics.k8s.io group. -type MetricsV1alpha1Client struct { - restClient rest.Interface -} - -func (c *MetricsV1alpha1Client) NodeMetricses() NodeMetricsInterface { - return newNodeMetricses(c) -} - -func (c *MetricsV1alpha1Client) PodMetricses(namespace string) PodMetricsInterface { - return newPodMetricses(c, namespace) -} - -// NewForConfig creates a new MetricsV1alpha1Client for the given config. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*MetricsV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(&config) - if err != nil { - return nil, err - } - return NewForConfigAndClient(&config, httpClient) -} - -// NewForConfigAndClient creates a new MetricsV1alpha1Client for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MetricsV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientForConfigAndClient(&config, h) - if err != nil { - return nil, err - } - return &MetricsV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new MetricsV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *MetricsV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new MetricsV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *MetricsV1alpha1Client { - return &MetricsV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *MetricsV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go deleted file mode 100644 index d79163ddb..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" - scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" -) - -// NodeMetricsesGetter has a method to return a NodeMetricsInterface. -// A group's client should implement this interface. -type NodeMetricsesGetter interface { - NodeMetricses() NodeMetricsInterface -} - -// NodeMetricsInterface has methods to work with NodeMetrics resources. -type NodeMetricsInterface interface { - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.NodeMetrics, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.NodeMetricsList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - NodeMetricsExpansion -} - -// nodeMetricses implements NodeMetricsInterface -type nodeMetricses struct { - client rest.Interface -} - -// newNodeMetricses returns a NodeMetricses -func newNodeMetricses(c *MetricsV1alpha1Client) *nodeMetricses { - return &nodeMetricses{ - client: c.RESTClient(), - } -} - -// Get takes name of the nodeMetrics, and returns the corresponding nodeMetrics object, and an error if there is any. -func (c *nodeMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodeMetrics, err error) { - result = &v1alpha1.NodeMetrics{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NodeMetricses that match those selectors. -func (c *nodeMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NodeMetricsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.NodeMetricsList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodeMetricses. -func (c *nodeMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go deleted file mode 100644 index 49d57c8e8..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" - scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" -) - -// PodMetricsesGetter has a method to return a PodMetricsInterface. -// A group's client should implement this interface. -type PodMetricsesGetter interface { - PodMetricses(namespace string) PodMetricsInterface -} - -// PodMetricsInterface has methods to work with PodMetrics resources. -type PodMetricsInterface interface { - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodMetrics, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodMetricsList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - PodMetricsExpansion -} - -// podMetricses implements PodMetricsInterface -type podMetricses struct { - client rest.Interface - ns string -} - -// newPodMetricses returns a PodMetricses -func newPodMetricses(c *MetricsV1alpha1Client, namespace string) *podMetricses { - return &podMetricses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podMetrics, and returns the corresponding podMetrics object, and an error if there is any. -func (c *podMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodMetrics, err error) { - result = &v1alpha1.PodMetrics{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodMetricses that match those selectors. -func (c *podMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodMetricsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PodMetricsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podMetricses. -func (c *podMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/doc.go deleted file mode 100644 index 771101956..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/generated_expansion.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/generated_expansion.go deleted file mode 100644 index a89ca3c78..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/generated_expansion.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type NodeMetricsExpansion interface{} - -type PodMetricsExpansion interface{} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/metrics_client.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/metrics_client.go deleted file mode 100644 index 7a02cea2e..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/metrics_client.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "net/http" - - rest "k8s.io/client-go/rest" - v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" - "k8s.io/metrics/pkg/client/clientset/versioned/scheme" -) - -type MetricsV1beta1Interface interface { - RESTClient() rest.Interface - NodeMetricsesGetter - PodMetricsesGetter -} - -// MetricsV1beta1Client is used to interact with features provided by the metrics.k8s.io group. -type MetricsV1beta1Client struct { - restClient rest.Interface -} - -func (c *MetricsV1beta1Client) NodeMetricses() NodeMetricsInterface { - return newNodeMetricses(c) -} - -func (c *MetricsV1beta1Client) PodMetricses(namespace string) PodMetricsInterface { - return newPodMetricses(c, namespace) -} - -// NewForConfig creates a new MetricsV1beta1Client for the given config. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*MetricsV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(&config) - if err != nil { - return nil, err - } - return NewForConfigAndClient(&config, httpClient) -} - -// NewForConfigAndClient creates a new MetricsV1beta1Client for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MetricsV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientForConfigAndClient(&config, h) - if err != nil { - return nil, err - } - return &MetricsV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new MetricsV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *MetricsV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new MetricsV1beta1Client for the given RESTClient. -func New(c rest.Interface) *MetricsV1beta1Client { - return &MetricsV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *MetricsV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go deleted file mode 100644 index a312221ed..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" - scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" -) - -// NodeMetricsesGetter has a method to return a NodeMetricsInterface. -// A group's client should implement this interface. -type NodeMetricsesGetter interface { - NodeMetricses() NodeMetricsInterface -} - -// NodeMetricsInterface has methods to work with NodeMetrics resources. -type NodeMetricsInterface interface { - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NodeMetrics, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.NodeMetricsList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - NodeMetricsExpansion -} - -// nodeMetricses implements NodeMetricsInterface -type nodeMetricses struct { - client rest.Interface -} - -// newNodeMetricses returns a NodeMetricses -func newNodeMetricses(c *MetricsV1beta1Client) *nodeMetricses { - return &nodeMetricses{ - client: c.RESTClient(), - } -} - -// Get takes name of the nodeMetrics, and returns the corresponding nodeMetrics object, and an error if there is any. -func (c *nodeMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NodeMetrics, err error) { - result = &v1beta1.NodeMetrics{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NodeMetricses that match those selectors. -func (c *nodeMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NodeMetricsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.NodeMetricsList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodeMetricses. -func (c *nodeMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go deleted file mode 100644 index e66c377c2..000000000 --- a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" - scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" -) - -// PodMetricsesGetter has a method to return a PodMetricsInterface. -// A group's client should implement this interface. -type PodMetricsesGetter interface { - PodMetricses(namespace string) PodMetricsInterface -} - -// PodMetricsInterface has methods to work with PodMetrics resources. -type PodMetricsInterface interface { - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodMetrics, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodMetricsList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - PodMetricsExpansion -} - -// podMetricses implements PodMetricsInterface -type podMetricses struct { - client rest.Interface - ns string -} - -// newPodMetricses returns a PodMetricses -func newPodMetricses(c *MetricsV1beta1Client, namespace string) *podMetricses { - return &podMetricses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podMetrics, and returns the corresponding podMetrics object, and an error if there is any. -func (c *podMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodMetrics, err error) { - result = &v1beta1.PodMetrics{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodMetricses that match those selectors. -func (c *podMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodMetricsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.PodMetricsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podMetricses. -func (c *podMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6382b8dfd..9740139a3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -346,6 +346,9 @@ github.com/google/uuid # github.com/gorilla/mux v1.8.1 ## explicit; go 1.20 github.com/gorilla/mux +# github.com/gorilla/websocket v1.4.2 +## explicit; go 1.12 +github.com/gorilla/websocket # github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 ## explicit github.com/gregjones/httpcache @@ -1787,15 +1790,6 @@ k8s.io/kubernetes/pkg/volume/util/recyclerclient k8s.io/kubernetes/pkg/volume/util/subpath k8s.io/kubernetes/pkg/volume/util/types k8s.io/kubernetes/pkg/volume/util/volumepathhandler -# k8s.io/metrics v0.26.3 => k8s.io/metrics v0.26.3 -## explicit; go 1.19 -k8s.io/metrics/pkg/apis/metrics -k8s.io/metrics/pkg/apis/metrics/v1alpha1 -k8s.io/metrics/pkg/apis/metrics/v1beta1 -k8s.io/metrics/pkg/client/clientset/versioned -k8s.io/metrics/pkg/client/clientset/versioned/scheme -k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1 -k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1 # k8s.io/mount-utils v0.23.3 => k8s.io/mount-utils v0.26.3 ## explicit; go 1.19 k8s.io/mount-utils