From bb0e2f9d2fc8c5958800e1c2cfaba723d0c665ff Mon Sep 17 00:00:00 2001 From: Guy Templeton Date: Mon, 22 May 2023 23:36:32 +0100 Subject: [PATCH] Cluster Autoscaler - 1.27.2 - Update k/k vendor to 1.27.2 --- cluster-autoscaler/go.mod | 84 ++++++++-------- cluster-autoscaler/go.sum | 79 +++++++-------- .../libcontainer/cgroups/ebpf/ebpf_linux.go | 2 +- .../runc/libcontainer/cgroups/fs/fs.go | 1 + .../libcontainer/cgroups/systemd/common.go | 66 +++++++++---- .../libcontainer/cgroups/systemd/cpuset.go | 5 + .../runc/libcontainer/cgroups/systemd/v1.go | 12 +-- .../runc/libcontainer/cgroups/systemd/v2.go | 2 +- .../runc/libcontainer/cgroups/utils.go | 6 +- .../configs/validate/validator.go | 5 +- .../runc/libcontainer/container_linux.go | 2 +- .../runc/libcontainer/eaccess_go119.go | 17 ++++ .../runc/libcontainer/eaccess_stub.go | 10 ++ .../runc/libcontainer/factory_linux.go | 11 ++- .../runc/libcontainer/init_linux.go | 5 +- .../runc/libcontainer/rootfs_linux.go | 82 ++++++++++------ .../runc/libcontainer/standard_init_linux.go | 11 ++- .../opencontainers/runc/libcontainer/sync.go | 14 +-- .../runc/libcontainer/system/linux.go | 19 ---- .../runc/libcontainer/user/user.go | 14 +-- .../k8s.io/api/batch/v1/generated.proto | 4 +- .../vendor/k8s.io/api/batch/v1/types.go | 4 +- .../batch/v1/types_swagger_doc_generated.go | 2 +- .../vendor/k8s.io/api/core/v1/generated.proto | 3 +- .../vendor/k8s.io/api/core/v1/types.go | 3 +- .../core/v1/types_swagger_doc_generated.go | 2 +- .../apimachinery/pkg/types/namespacedname.go | 3 +- .../k8s.io/apiserver/pkg/cel/common/values.go | 20 +++- .../client-go/discovery/discovery_client.go | 64 ++++++++---- .../vendor/k8s.io/client-go/openapi/client.go | 7 +- .../k8s.io/client-go/openapi/groupversion.go | 42 ++++++-- .../kube-openapi/pkg/handler/handler.go | 16 ++- .../k8s.io/kubernetes/pkg/apis/core/types.go | 3 +- .../kubernetes/pkg/features/kube_features.go | 4 +- .../pkg/kubelet/cm/devicemanager/manager.go | 24 ++++- .../pkg/kubelet/kuberuntime/helpers.go | 60 +++++++----- .../kubelet/kuberuntime/security_context.go | 11 ++- .../kubernetes/pkg/kubelet/pod_workers.go | 33 +++++-- .../kubernetes/pkg/probe/http/request.go | 2 +- .../kubernetes/pkg/proxy/ipvs/netlink.go | 5 + .../pkg/proxy/ipvs/netlink_linux.go | 28 ++++++ .../pkg/proxy/ipvs/netlink_unsupported.go | 5 + .../kubernetes/pkg/proxy/ipvs/proxier.go | 22 +++-- .../framework/preemption/preemption.go | 3 + .../scheduler/framework/runtime/framework.go | 16 ++- .../framework/runtime/instrumented_plugins.go | 54 ++++++++++ .../pkg/volume/configmap/configmap.go | 2 +- .../kubernetes/pkg/volume/csi/csi_attacher.go | 43 ++------ .../kubernetes/pkg/volume/csi/csi_mounter.go | 2 +- .../kubernetes/pkg/volume/csi/csi_util.go | 2 +- .../pkg/volume/downwardapi/downwardapi.go | 2 +- .../pkg/volume/emptydir/empty_dir.go | 2 +- .../kubernetes/pkg/volume/fc/disk_manager.go | 2 +- .../pkg/volume/flexvolume/mounter.go | 2 +- .../kubernetes/pkg/volume/gcepd/gce_pd.go | 2 +- .../pkg/volume/git_repo/git_repo.go | 2 +- .../pkg/volume/iscsi/disk_manager.go | 2 +- .../kubernetes/pkg/volume/local/local.go | 2 +- .../pkg/volume/portworx/portworx.go | 2 +- .../pkg/volume/projected/projected.go | 2 +- .../kubernetes/pkg/volume/rbd/disk_manager.go | 2 +- .../kubernetes/pkg/volume/secret/secret.go | 2 +- .../kubernetes/pkg/volume/volume_linux.go | 16 ++- .../pkg/volume/volume_unsupported.go | 2 +- .../volume/vsphere_volume/vsphere_volume.go | 2 +- cluster-autoscaler/vendor/modules.txt | 98 +++++++++---------- cluster-autoscaler/version/version.go | 2 +- 67 files changed, 677 insertions(+), 403 deletions(-) create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_go119.go create mode 100644 cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_stub.go create mode 100644 cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/instrumented_plugins.go diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod index c973a6c35de9..39e559f41406 100644 --- a/cluster-autoscaler/go.mod +++ b/cluster-autoscaler/go.mod @@ -30,21 +30,23 @@ require ( golang.org/x/crypto v0.8.0 golang.org/x/net v0.9.0 golang.org/x/oauth2 v0.7.0 + golang.org/x/sys v0.7.0 google.golang.org/api v0.114.0 google.golang.org/grpc v1.54.0 google.golang.org/protobuf v1.30.0 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.27.1 - k8s.io/apimachinery v0.27.1 - k8s.io/apiserver v0.27.1 - k8s.io/client-go v0.27.1 - k8s.io/cloud-provider v0.27.1 + k8s.io/api v0.27.2 + k8s.io/apimachinery v0.27.2 + k8s.io/apiserver v0.27.2 + k8s.io/client-go v0.27.2 + k8s.io/cloud-provider v0.27.2 k8s.io/cloud-provider-aws v1.27.0 - k8s.io/component-base v0.27.1 - k8s.io/component-helpers v0.27.1 + k8s.io/component-base v0.27.2 + k8s.io/component-helpers v0.27.2 k8s.io/klog/v2 v2.90.1 - k8s.io/kubernetes v1.27.1 + k8s.io/kubelet v0.27.2 + k8s.io/kubernetes v1.27.2 k8s.io/legacy-cloud-providers v0.0.0 k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 sigs.k8s.io/cloud-provider-azure v1.26.2 @@ -126,7 +128,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/runc v1.1.4 // indirect + github.com/opencontainers/runc v1.1.6 // indirect github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect github.com/opencontainers/selinux v1.10.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -163,7 +165,6 @@ require ( go.uber.org/zap v1.24.0 // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.7.0 // indirect golang.org/x/term v0.7.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect @@ -173,16 +174,15 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/controller-manager v0.27.1 // indirect + k8s.io/controller-manager v0.27.2 // indirect k8s.io/cri-api v0.0.0 // indirect k8s.io/csi-translation-lib v0.27.0 // indirect k8s.io/dynamic-resource-allocation v0.0.0 // indirect - k8s.io/kms v0.27.1 // indirect - k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c // indirect + k8s.io/kms v0.27.2 // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect k8s.io/kube-proxy v0.0.0 // indirect k8s.io/kube-scheduler v0.0.0 // indirect k8s.io/kubectl v0.0.0 // indirect - k8s.io/kubelet v0.27.1 // indirect k8s.io/mount-utils v0.26.0-alpha.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect @@ -196,62 +196,62 @@ replace github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0 replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0 -replace k8s.io/api => k8s.io/api v0.27.1 +replace k8s.io/api => k8s.io/api v0.27.2 -replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.1 +replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.2 -replace k8s.io/apimachinery => k8s.io/apimachinery v0.28.0-alpha.0 +replace k8s.io/apimachinery => k8s.io/apimachinery v0.27.2 -replace k8s.io/apiserver => k8s.io/apiserver v0.27.1 +replace k8s.io/apiserver => k8s.io/apiserver v0.27.2 -replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.1 +replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.2 -replace k8s.io/client-go => k8s.io/client-go v0.27.1 +replace k8s.io/client-go => k8s.io/client-go v0.27.2 -replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.1 +replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.2 -replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.1 +replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.2 -replace k8s.io/code-generator => k8s.io/code-generator v0.27.1 +replace k8s.io/code-generator => k8s.io/code-generator v0.27.2 -replace k8s.io/component-base => k8s.io/component-base v0.27.1 +replace k8s.io/component-base => k8s.io/component-base v0.27.2 -replace k8s.io/component-helpers => k8s.io/component-helpers v0.27.1 +replace k8s.io/component-helpers => k8s.io/component-helpers v0.27.2 -replace k8s.io/controller-manager => k8s.io/controller-manager v0.27.1 +replace k8s.io/controller-manager => k8s.io/controller-manager v0.27.2 replace k8s.io/cri-api => k8s.io/cri-api v0.28.0-alpha.0 -replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.1 +replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.2 -replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.1 +replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.2 -replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.1 +replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.2 -replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.1 +replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.2 -replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.1 +replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.2 -replace k8s.io/kubectl => k8s.io/kubectl v0.27.1 +replace k8s.io/kubectl => k8s.io/kubectl v0.27.2 -replace k8s.io/kubelet => k8s.io/kubelet v0.27.1 +replace k8s.io/kubelet => k8s.io/kubelet v0.27.2 -replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.1 +replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.2 -replace k8s.io/metrics => k8s.io/metrics v0.27.1 +replace k8s.io/metrics => k8s.io/metrics v0.27.2 -replace k8s.io/mount-utils => k8s.io/mount-utils v0.27.1 +replace k8s.io/mount-utils => k8s.io/mount-utils v0.27.2 -replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.1 +replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.2 -replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.1 +replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.2 -replace k8s.io/sample-controller => k8s.io/sample-controller v0.27.1 +replace k8s.io/sample-controller => k8s.io/sample-controller v0.27.2 -replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.1 +replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.2 -replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.1 +replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.2 -replace k8s.io/kms => k8s.io/kms v0.27.1 +replace k8s.io/kms => k8s.io/kms v0.27.2 replace k8s.io/noderesourcetopology-api => k8s.io/noderesourcetopology-api v0.27.0 diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum index 0ea3cbfcba05..c9e03027ac58 100644 --- a/cluster-autoscaler/go.sum +++ b/cluster-autoscaler/go.sum @@ -490,8 +490,9 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg= github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runc v1.1.6 h1:XbhB8IfG/EsnhNvZtNdLB0GBw92GYEFvKlhaJk9jUgA= +github.com/opencontainers/runc v1.1.6/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1161,52 +1162,52 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.27.1 h1:Z6zUGQ1Vd10tJ+gHcNNNgkV5emCyW+v2XTmn+CLjSd0= -k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E= -k8s.io/apimachinery v0.28.0-alpha.0 h1:GZf6I49h9Sjl2Rjc+jY72nEYApr1pCKEHoOP/KxWWrA= -k8s.io/apimachinery v0.28.0-alpha.0/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM= -k8s.io/apiserver v0.27.1 h1:phY+BtXjjzd+ta3a4kYbomC81azQSLa1K8jo9RBw7Lg= -k8s.io/apiserver v0.27.1/go.mod h1:UGrOjLY2KsieA9Fw6lLiTObxTb8Z1xEba4uqSuMY0WU= -k8s.io/client-go v0.27.1 h1:oXsfhW/qncM1wDmWBIuDzRHNS2tLhK3BZv512Nc59W8= -k8s.io/client-go v0.27.1/go.mod h1:f8LHMUkVb3b9N8bWturc+EDtVVVwZ7ueTVquFAJb2vA= -k8s.io/cloud-provider v0.27.1 h1:482W9e2Yp8LDgTUKrXAxT+nH4pHS2TiBElI/CnfGWac= -k8s.io/cloud-provider v0.27.1/go.mod h1:oN7Zci2Ls2dorwSNd2fMiW/6DA40+F4o2QL70p63bqo= +k8s.io/api v0.27.2 h1:+H17AJpUMvl+clT+BPnKf0E3ksMAzoBBg7CntpSuADo= +k8s.io/api v0.27.2/go.mod h1:ENmbocXfBT2ADujUXcBhHV55RIT31IIEvkntP6vZKS4= +k8s.io/apimachinery v0.27.2 h1:vBjGaKKieaIreI+oQwELalVG4d8f3YAMNpWLzDXkxeg= +k8s.io/apimachinery v0.27.2/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= +k8s.io/apiserver v0.27.2 h1:p+tjwrcQEZDrEorCZV2/qE8osGTINPuS5ZNqWAvKm5E= +k8s.io/apiserver v0.27.2/go.mod h1:EsOf39d75rMivgvvwjJ3OW/u9n1/BmUMK5otEOJrb1Y= +k8s.io/client-go v0.27.2 h1:vDLSeuYvCHKeoQRhCXjxXO45nHVv2Ip4Fe0MfioMrhE= +k8s.io/client-go v0.27.2/go.mod h1:tY0gVmUsHrAmjzHX9zs7eCjxcBsf8IiNe7KQ52biTcQ= +k8s.io/cloud-provider v0.27.2 h1:IiQWyFtdzcPOqvrBZE9FCt0CDCx3GUcZhKkykEgKlM4= +k8s.io/cloud-provider v0.27.2/go.mod h1:QnFa2fPMEWntkpU+kOAC9MZ6DKUB9WTQmMGA0MuYoj0= k8s.io/cloud-provider-aws v1.27.0 h1:PF8YrH8QcN6JoXB3Xxlaz84SBDYMPunJuCc0cPuCWXA= k8s.io/cloud-provider-aws v1.27.0/go.mod h1:9vUb5mnVnReSRDBWcBxB1b0HOeEc472iOPmrnwpN9SA= -k8s.io/component-base v0.27.1 h1:kEB8p8lzi4gCs5f2SPU242vOumHJ6EOsOnDM3tTuDTM= -k8s.io/component-base v0.27.1/go.mod h1:UGEd8+gxE4YWoigz5/lb3af3Q24w98pDseXcXZjw+E0= -k8s.io/component-helpers v0.27.1 h1:uY63v834MAHuf3fBiKGQGPq/cToU5kY5SW/58Xv0gl4= -k8s.io/component-helpers v0.27.1/go.mod h1:oOpwSYW1AdL+pU7abHADwX1ZcJl+5c8mnIkvoFZNFWA= -k8s.io/controller-manager v0.27.1 h1:+4OGWAzg4JVLEauPSmyQFIfrYrYQoUsC4MbHmRuPaFU= -k8s.io/controller-manager v0.27.1/go.mod h1:oe9vKl0RPiedlCXmeVbhkDV2yX8r7C4K/B8OGaKdYtY= +k8s.io/component-base v0.27.2 h1:neju+7s/r5O4x4/txeUONNTS9r1HsPbyoPBAtHsDCpo= +k8s.io/component-base v0.27.2/go.mod h1:5UPk7EjfgrfgRIuDBFtsEFAe4DAvP3U+M8RTzoSJkpo= +k8s.io/component-helpers v0.27.2 h1:i9TgWJ6TH8lQ9x4ExHOwhVitrRpBOr7Wn8aZLbBWxkc= +k8s.io/component-helpers v0.27.2/go.mod h1:NwcpSKo1xzXtUtrUjj5NTSVWex84UPua/z0PYDcCzNo= +k8s.io/controller-manager v0.27.2 h1:S7984FVb5ajp8YqMQGAm8zXEUEl0Omw6FJlOiQU2Ne8= +k8s.io/controller-manager v0.27.2/go.mod h1:2HzIhmjKxSH5dJVjYLuJ7/v9HYluNDcHLh6ZyE6rT18= k8s.io/cri-api v0.28.0-alpha.0 h1:Z8LNc0JDsR+Y/ONTfHYW/xQoT/ZOieY2jBj9M/0eJM4= k8s.io/cri-api v0.28.0-alpha.0/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0= -k8s.io/csi-translation-lib v0.27.1 h1:D9Hw2iBZzFPriFH0FDyUFdfflYAW6S032P6Yps9sKq8= -k8s.io/csi-translation-lib v0.27.1/go.mod h1:MyBDHVDz24OOSc4FdmSZA2nkfNu+Ysu8BqjdOAcKoT8= -k8s.io/dynamic-resource-allocation v0.27.1 h1:4HsIhgO49Yv+C1Zsw4R18tzXgtuEfwChqOwDIi/AcxE= -k8s.io/dynamic-resource-allocation v0.27.1/go.mod h1:XRA0ZE3wVNd2yYSnM8rFWStrrGGvHUhz9wfmHUXkgGY= +k8s.io/csi-translation-lib v0.27.2 h1:HbwiOk+M3jIkTC+e5nxUCwmux68OguKV/g9NaHDQhzs= +k8s.io/csi-translation-lib v0.27.2/go.mod h1:S+jXLzOHm7wvroOja2VMLo9LGiIq9mS0/SyswJtWOjE= +k8s.io/dynamic-resource-allocation v0.27.2 h1:lNt4YOVoJqi+wcBesTVJ3KAfr3HnvLedO1/ZovE26pk= +k8s.io/dynamic-resource-allocation v0.27.2/go.mod h1:drwmePgR9Dc5Y3nYBHkduz+lYV2XukSTLYvV5qJOPKY= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.27.1 h1:JTSQbJb+mcobScQwF0bOmZhIwP17k8GvBsiLlA6SQqw= -k8s.io/kms v0.27.1/go.mod h1:VuTsw0uHlSycKLCkypCGxfFCjLfzf/5YMeATECd/zJA= -k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c h1:EFfsozyzZ/pggw5qNx7ftTVZdp7WZl+3ih89GEjYEK8= -k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= -k8s.io/kube-proxy v0.27.1 h1:awlTLXvZhM/A4Nsu0ma34uKR4pHxigj9vhuQ9BHfwUk= -k8s.io/kube-proxy v0.27.1/go.mod h1:6hJ7Fnt3QtD+5cpGN6MgZOOO9KbD6TvF0/BPHk+lYtQ= -k8s.io/kube-scheduler v0.27.1 h1:Tq7ff+jUZaK8fejL4uOy1CC2B+bz2acKQ7Bf7fCtnhs= -k8s.io/kube-scheduler v0.27.1/go.mod h1:NS0RUYehdV7o1YQXO2/Ym/JAq2+nA/zrVABjbVyLJA8= -k8s.io/kubectl v0.27.1 h1:9T5c5KdpburYiW8XKQSH0Uly1kMNE90aGSnbYUZNdcA= -k8s.io/kubectl v0.27.1/go.mod h1:QsAkSmrRsKTPlAFzF8kODGDl4p35BIwQnc9XFhkcsy8= -k8s.io/kubelet v0.27.1 h1:IkfZ0N9CX/g6EDis7nJw8ZsOuHcpFA6cm0pXQx0g5TY= -k8s.io/kubelet v0.27.1/go.mod h1:g3cIhpZPawo/MvsdnmcLmqDJvDPdbUFkzfyLNz03nQg= -k8s.io/kubernetes v1.27.1 h1:DFeW4Lv+kh5DyYcezOzwmQAbC3VqXAxnMyZabALiRSc= -k8s.io/kubernetes v1.27.1/go.mod h1:TTwPjSCKQ+a/NTiFKRGjvOnEaQL8wIG40nsYH8Er4bA= -k8s.io/legacy-cloud-providers v0.27.1 h1:P0bzBX7gSx0yPeG9KDSspiy/M23gvLPLbwe4pYOS9bQ= -k8s.io/legacy-cloud-providers v0.27.1/go.mod h1:Vhh/i+Qt/ayPR40c2q3pMswg4/W8AnHsET45SEokSig= -k8s.io/mount-utils v0.27.1 h1:RSd0wslbIuwLRaGGNAGMZ3m9FLcvukxJ3FWlOm76W2A= -k8s.io/mount-utils v0.27.1/go.mod h1:vmcjYdi2Vg1VTWY7KkhvwJVY6WDHxb/QQhiQKkR8iNs= +k8s.io/kms v0.27.2 h1:wCdmPCa3kubcVd3AssOeaVjLQSu45k5g/vruJ3iqwDU= +k8s.io/kms v0.27.2/go.mod h1:dahSqjI05J55Fo5qipzvHSRbm20d7llrSeQjjl86A7c= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/kube-proxy v0.27.2 h1:nb/ASUpYoXlueURXnY+O2IZkCZmIYOnDprFEeiwwOCY= +k8s.io/kube-proxy v0.27.2/go.mod h1:S0Dxzz/5F+RAk/9v7d42gPwwvv7WZ6IYjoXVj4kBWIY= +k8s.io/kube-scheduler v0.27.2 h1:ZsN8meIkmJ+wnFrvhi5YzIbueBeBz2xx4I/0cKgpnlg= +k8s.io/kube-scheduler v0.27.2/go.mod h1:Prpp+OHy8Ecl4ubsF2Zj7gDWYI8D1AP4ZSL8275VkOs= +k8s.io/kubectl v0.27.2 h1:sSBM2j94MHBFRWfHIWtEXWCicViQzZsb177rNsKBhZg= +k8s.io/kubectl v0.27.2/go.mod h1:GCOODtxPcrjh+EC611MqREkU8RjYBh10ldQCQ6zpFKw= +k8s.io/kubelet v0.27.2 h1:vpJnBkqQjxItEhehKG0toXoZ+G+tf4UXAOqtMJy6qgc= +k8s.io/kubelet v0.27.2/go.mod h1:1SVrHaLnuw53nQJx8036k9HjE0teDXZtbN51cYC0HSc= +k8s.io/kubernetes v1.27.2 h1:g4v9oY6u7vBUDEuq4FvC50Bbw2K7GZuvM00IIESWVf4= +k8s.io/kubernetes v1.27.2/go.mod h1:U8ZXeKBAPxeb4J4/HOaxjw1A9K6WfSH+fY2SS7CR6IM= +k8s.io/legacy-cloud-providers v0.27.2 h1:4D56C4lm+Byu4z34f0sGBkMFlUWpPUqYjaawIrXaGZQ= +k8s.io/legacy-cloud-providers v0.27.2/go.mod h1:f0NDYP0WZNN1SnID37MvJ/5KXxy3IlgO5q4IgnYfnJs= +k8s.io/mount-utils v0.27.2 h1:fEqtBdAv88xpoPr3nR0MgYs6P+2PjXyUTwd4NmqSBjY= +k8s.io/mount-utils v0.27.2/go.mod h1:vmcjYdi2Vg1VTWY7KkhvwJVY6WDHxb/QQhiQKkR8iNs= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go index 104c74a890f8..35b00aaf0552 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/ebpf/ebpf_linux.go @@ -93,7 +93,7 @@ var ( ) // Loosely based on the BPF_F_REPLACE support check in -// . +// https://github.com/cilium/ebpf/blob/v0.6.0/link/syscalls.go. // // TODO: move this logic to cilium/ebpf func haveBpfProgReplace() bool { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go index fb4fcc7f75bb..9e2f0ec04c8a 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs.go @@ -28,6 +28,7 @@ var subsystems = []subsystem{ &FreezerGroup{}, &RdmaGroup{}, &NameGroup{GroupName: "name=systemd", Join: true}, + &NameGroup{GroupName: "misc", Join: true}, } var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist") diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go index 45744c15c0a1..50746ae0c569 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go @@ -293,8 +293,18 @@ func generateDeviceProperties(r *configs.Resources) ([]systemdDbus.Property, err // rules separately to systemd) we can safely skip entries that don't // have a corresponding path. if _, err := os.Stat(entry.Path); err != nil { - logrus.Debugf("skipping device %s for systemd: %s", entry.Path, err) - continue + // Also check /sys/dev so that we don't depend on /dev/{block,char} + // being populated. (/dev/{block,char} is populated by udev, which + // isn't strictly required for systemd). Ironically, this happens most + // easily when starting containerd within a runc created container + // itself. + + // We don't bother with securejoin here because we create entry.Path + // right above here, so we know it's safe. + if _, err := os.Stat("/sys" + entry.Path); err != nil { + logrus.Warnf("skipping device %s for systemd: %s", entry.Path, err) + continue + } } } deviceAllowList = append(deviceAllowList, entry) @@ -343,32 +353,52 @@ func isUnitExists(err error) bool { return isDbusError(err, "org.freedesktop.systemd1.UnitExists") } -func startUnit(cm *dbusConnManager, unitName string, properties []systemdDbus.Property) error { +func startUnit(cm *dbusConnManager, unitName string, properties []systemdDbus.Property, ignoreExist bool) error { statusChan := make(chan string, 1) + retry := true + +retry: err := cm.retryOnDisconnect(func(c *systemdDbus.Conn) error { _, err := c.StartTransientUnitContext(context.TODO(), unitName, "replace", properties, statusChan) return err }) - if err == nil { - timeout := time.NewTimer(30 * time.Second) - defer timeout.Stop() - - select { - case s := <-statusChan: - close(statusChan) - // Please refer to https://pkg.go.dev/github.com/coreos/go-systemd/v22/dbus#Conn.StartUnit - if s != "done" { - resetFailedUnit(cm, unitName) - return fmt.Errorf("error creating systemd unit `%s`: got `%s`", unitName, s) - } - case <-timeout.C: + if err != nil { + if !isUnitExists(err) { + return err + } + if ignoreExist { + // TODO: remove this hack. + // This is kubelet making sure a slice exists (see + // https://github.com/opencontainers/runc/pull/1124). + return nil + } + if retry { + // In case a unit with the same name exists, this may + // be a leftover failed unit. Reset it, so systemd can + // remove it, and retry once. resetFailedUnit(cm, unitName) - return errors.New("Timeout waiting for systemd to create " + unitName) + retry = false + goto retry } - } else if !isUnitExists(err) { return err } + timeout := time.NewTimer(30 * time.Second) + defer timeout.Stop() + + select { + case s := <-statusChan: + close(statusChan) + // Please refer to https://pkg.go.dev/github.com/coreos/go-systemd/v22/dbus#Conn.StartUnit + if s != "done" { + resetFailedUnit(cm, unitName) + return fmt.Errorf("error creating systemd unit `%s`: got `%s`", unitName, s) + } + case <-timeout.C: + resetFailedUnit(cm, unitName) + return errors.New("Timeout waiting for systemd to create " + unitName) + } + return nil } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go index 83d10dd705fd..dd474cf1b168 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/cpuset.go @@ -51,5 +51,10 @@ func RangeToBits(str string) ([]byte, error) { // do not allow empty values return nil, errors.New("empty value") } + + // fit cpuset parsing order in systemd + for l, r := 0, len(ret)-1; l < r; l, r = l+1, r-1 { + ret[l], ret[r] = ret[r], ret[l] + } return ret, nil } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go index a74a05a5cd05..046c3056fba8 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go @@ -71,6 +71,7 @@ var legacySubsystems = []subsystem{ &fs.NetClsGroup{}, &fs.NameGroup{GroupName: "name=systemd"}, &fs.RdmaGroup{}, + &fs.NameGroup{GroupName: "misc"}, } func genV1ResourcesProperties(r *configs.Resources, cm *dbusConnManager) ([]systemdDbus.Property, error) { @@ -206,7 +207,7 @@ func (m *legacyManager) Apply(pid int) error { properties = append(properties, c.SystemdProps...) - if err := startUnit(m.dbus, unitName, properties); err != nil { + if err := startUnit(m.dbus, unitName, properties, pid == -1); err != nil { return err } @@ -273,14 +274,7 @@ func getSubsystemPath(slice, unit, subsystem string) (string, error) { return "", err } - initPath, err := cgroups.GetInitCgroup(subsystem) - if err != nil { - return "", err - } - // if pid 1 is systemd 226 or later, it will be in init.scope, not the root - initPath = strings.TrimSuffix(filepath.Clean(initPath), "init.scope") - - return filepath.Join(mountpoint, initPath, slice, unit), nil + return filepath.Join(mountpoint, slice, unit), nil } func (m *legacyManager) Freeze(state configs.FreezerState) error { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go index de0cb974d460..94d24ee45020 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v2.go @@ -284,7 +284,7 @@ func (m *unifiedManager) Apply(pid int) error { properties = append(properties, c.SystemdProps...) - if err := startUnit(m.dbus, unitName, properties); err != nil { + if err := startUnit(m.dbus, unitName, properties, pid == -1); err != nil { return fmt.Errorf("unable to start unit %q (properties %+v): %w", unitName, properties, err) } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go index b32af4ee5302..fc4ae44a4853 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go @@ -162,8 +162,10 @@ func readProcsFile(dir string) ([]int, error) { // ParseCgroupFile parses the given cgroup file, typically /proc/self/cgroup // or /proc//cgroup, into a map of subsystems to cgroup paths, e.g. -// "cpu": "/user.slice/user-1000.slice" -// "pids": "/user.slice/user-1000.slice" +// +// "cpu": "/user.slice/user-1000.slice" +// "pids": "/user.slice/user-1000.slice" +// // etc. // // Note that for cgroup v2 unified hierarchy, there are no per-controller diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go index 627621a58d86..4fbd308dadd6 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/configs/validate/validator.go @@ -131,9 +131,8 @@ func (v *ConfigValidator) cgroupnamespace(config *configs.Config) error { // convertSysctlVariableToDotsSeparator can return sysctl variables in dots separator format. // The '/' separator is also accepted in place of a '.'. // Convert the sysctl variables to dots separator format for validation. -// More info: -// https://man7.org/linux/man-pages/man8/sysctl.8.html -// https://man7.org/linux/man-pages/man5/sysctl.d.5.html +// More info: sysctl(8), sysctl.d(5). +// // For example: // Input sysctl variable "net/ipv4/conf/eno2.100.rp_filter" // will return the converted value "net.ipv4.conf.eno2/100.rp_filter" diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go index 9df830d8cdbb..dd61dfd3c90c 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go @@ -926,7 +926,7 @@ func (c *linuxContainer) criuSupportsExtNS(t configs.NamespaceType) bool { } func criuNsToKey(t configs.NamespaceType) string { - return "extRoot" + strings.Title(configs.NsName(t)) + "NS" + return "extRoot" + strings.Title(configs.NsName(t)) + "NS" //nolint:staticcheck // SA1019: strings.Title is deprecated } func (c *linuxContainer) handleCheckpointingExternalNamespaces(rpcOpts *criurpc.CriuOpts, t configs.NamespaceType) error { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_go119.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_go119.go new file mode 100644 index 000000000000..cc1e2079a795 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_go119.go @@ -0,0 +1,17 @@ +//go:build !go1.20 +// +build !go1.20 + +package libcontainer + +import "golang.org/x/sys/unix" + +func eaccess(path string) error { + // This check is similar to access(2) with X_OK except for + // setuid/setgid binaries where it checks against the effective + // (rather than real) uid and gid. It is not needed in go 1.20 + // and beyond and will be removed later. + + // Relies on code added in https://go-review.googlesource.com/c/sys/+/468877 + // and older CLs linked from there. + return unix.Faccessat(unix.AT_FDCWD, path, unix.X_OK, unix.AT_EACCESS) +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_stub.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_stub.go new file mode 100644 index 000000000000..7c049fd7aa02 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/eaccess_stub.go @@ -0,0 +1,10 @@ +//go:build go1.20 + +package libcontainer + +func eaccess(path string) error { + // Not needed in Go 1.20+ as the functionality is already in there + // (added by https://go.dev/cl/416115, https://go.dev/cl/414824, + // and fixed in Go 1.20.2 by https://go.dev/cl/469956). + return nil +} diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go index e6c71ac34e37..a1fa7de2d249 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go @@ -179,6 +179,12 @@ func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, err return nil, fmt.Errorf("unable to get cgroup PIDs: %w", err) } if len(pids) != 0 { + if config.Cgroups.Systemd { + // systemd cgroup driver can't add a pid to an + // existing systemd unit and will return an + // error anyway, so let's error out early. + return nil, fmt.Errorf("container's cgroup is not empty: %d process(es) found", len(pids)) + } // TODO: return an error. logrus.Warnf("container's cgroup is not empty: %d process(es) found", len(pids)) logrus.Warn("DEPRECATED: running container in a non-empty cgroup won't be supported in runc 1.2; https://github.com/opencontainers/runc/issues/3132") @@ -338,10 +344,9 @@ func (l *LinuxFactory) StartInitialization() (err error) { defer func() { if e := recover(); e != nil { - if e, ok := e.(error); ok { - err = fmt.Errorf("panic from initialization: %w, %s", e, debug.Stack()) + if ee, ok := e.(error); ok { + err = fmt.Errorf("panic from initialization: %w, %s", ee, debug.Stack()) } else { - //nolint:errorlint // here e is not of error type err = fmt.Errorf("panic from initialization: %v, %s", e, debug.Stack()) } } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go index 1e5c394c3e06..2e4c59353c83 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go @@ -411,8 +411,9 @@ func fixStdioPermissions(u *user.ExecUser) error { return &os.PathError{Op: "fstat", Path: file.Name(), Err: err} } - // Skip chown if uid is already the one we want. - if int(s.Uid) == u.Uid { + // Skip chown if uid is already the one we want or any of the STDIO descriptors + // were redirected to /dev/null. + if int(s.Uid) == u.Uid || s.Rdev == null.Rdev { continue } diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go index ec7638e4d512..c3f88fc7038b 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go @@ -329,26 +329,41 @@ func mountCgroupV2(m *configs.Mount, c *mountConfig) error { if err := os.MkdirAll(dest, 0o755); err != nil { return err } - return utils.WithProcfd(c.root, m.Destination, func(procfd string) error { - if err := mount(m.Source, m.Destination, procfd, "cgroup2", uintptr(m.Flags), m.Data); err != nil { - // when we are in UserNS but CgroupNS is not unshared, we cannot mount cgroup2 (#2158) - if errors.Is(err, unix.EPERM) || errors.Is(err, unix.EBUSY) { - src := fs2.UnifiedMountpoint - if c.cgroupns && c.cgroup2Path != "" { - // Emulate cgroupns by bind-mounting - // the container cgroup path rather than - // the whole /sys/fs/cgroup. - src = c.cgroup2Path - } - err = mount(src, m.Destination, procfd, "", uintptr(m.Flags)|unix.MS_BIND, "") - if c.rootlessCgroups && errors.Is(err, unix.ENOENT) { - err = nil - } - } - return err - } - return nil + err = utils.WithProcfd(c.root, m.Destination, func(procfd string) error { + return mount(m.Source, m.Destination, procfd, "cgroup2", uintptr(m.Flags), m.Data) }) + if err == nil || !(errors.Is(err, unix.EPERM) || errors.Is(err, unix.EBUSY)) { + return err + } + + // When we are in UserNS but CgroupNS is not unshared, we cannot mount + // cgroup2 (#2158), so fall back to bind mount. + bindM := &configs.Mount{ + Device: "bind", + Source: fs2.UnifiedMountpoint, + Destination: m.Destination, + Flags: unix.MS_BIND | m.Flags, + PropagationFlags: m.PropagationFlags, + } + if c.cgroupns && c.cgroup2Path != "" { + // Emulate cgroupns by bind-mounting the container cgroup path + // rather than the whole /sys/fs/cgroup. + bindM.Source = c.cgroup2Path + } + // mountToRootfs() handles remounting for MS_RDONLY. + // No need to set c.fd here, because mountToRootfs() calls utils.WithProcfd() by itself in mountPropagate(). + err = mountToRootfs(bindM, c) + if c.rootlessCgroups && errors.Is(err, unix.ENOENT) { + // ENOENT (for `src = c.cgroup2Path`) happens when rootless runc is being executed + // outside the userns+mountns. + // + // Mask `/sys/fs/cgroup` to ensure it is read-only, even when `/sys` is mounted + // with `rbind,ro` (`runc spec --rootless` produces `rbind,ro` for `/sys`). + err = utils.WithProcfd(c.root, m.Destination, func(procfd string) error { + return maskPath(procfd, c.label) + }) + } + return err } func doTmpfsCopyUp(m *configs.Mount, rootfs, mountLabel string) (Err error) { @@ -398,32 +413,43 @@ func doTmpfsCopyUp(m *configs.Mount, rootfs, mountLabel string) (Err error) { func mountToRootfs(m *configs.Mount, c *mountConfig) error { rootfs := c.root - mountLabel := c.label - mountFd := c.fd - dest, err := securejoin.SecureJoin(rootfs, m.Destination) - if err != nil { - return err - } + // procfs and sysfs are special because we need to ensure they are actually + // mounted on a specific path in a container without any funny business. switch m.Device { case "proc", "sysfs": // If the destination already exists and is not a directory, we bail - // out This is to avoid mounting through a symlink or similar -- which + // out. This is to avoid mounting through a symlink or similar -- which // has been a "fun" attack scenario in the past. // TODO: This won't be necessary once we switch to libpathrs and we can // stop all of these symlink-exchange attacks. + dest := filepath.Clean(m.Destination) + if !strings.HasPrefix(dest, rootfs) { + // Do not use securejoin as it resolves symlinks. + dest = filepath.Join(rootfs, dest) + } if fi, err := os.Lstat(dest); err != nil { if !os.IsNotExist(err) { return err } - } else if fi.Mode()&os.ModeDir == 0 { + } else if !fi.IsDir() { return fmt.Errorf("filesystem %q must be mounted on ordinary directory", m.Device) } if err := os.MkdirAll(dest, 0o755); err != nil { return err } - // Selinux kernels do not support labeling of /proc or /sys + // Selinux kernels do not support labeling of /proc or /sys. return mountPropagate(m, rootfs, "", nil) + } + + mountLabel := c.label + mountFd := c.fd + dest, err := securejoin.SecureJoin(rootfs, m.Destination) + if err != nil { + return err + } + + switch m.Device { case "mqueue": if err := os.MkdirAll(dest, 0o755); err != nil { return err diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go index 081d1503a3f3..c09a7bed30ea 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go @@ -198,11 +198,12 @@ func (l *linuxStandardInit) Init() error { if err != nil { return err } - // exec.LookPath might return no error for an executable residing on a - // file system mounted with noexec flag, so perform this extra check - // now while we can still return a proper error. - if err := system.Eaccess(name); err != nil { - return &os.PathError{Op: "exec", Path: name, Err: err} + // exec.LookPath in Go < 1.20 might return no error for an executable + // residing on a file system mounted with noexec flag, so perform this + // extra check now while we can still return a proper error. + // TODO: remove this once go < 1.20 is not supported. + if err := eaccess(name); err != nil { + return &os.PathError{Op: "eaccess", Path: name, Err: err} } // Set seccomp as close to execve as possible, so as few syscalls take diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/sync.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/sync.go index c9a23ef3a760..25dc28630710 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/sync.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/sync.go @@ -15,16 +15,16 @@ type syncType string // during container setup. They come in pairs (with procError being a generic // response which is followed by an &initError). // -// [ child ] <-> [ parent ] +// [ child ] <-> [ parent ] // -// procHooks --> [run hooks] -// <-- procResume +// procHooks --> [run hooks] +// <-- procResume // -// procReady --> [final setup] -// <-- procRun +// procReady --> [final setup] +// <-- procRun // -// procSeccomp --> [pick up seccomp fd with pidfd_getfd()] -// <-- procSeccompDone +// procSeccomp --> [pick up seccomp fd with pidfd_getfd()] +// <-- procSeccompDone const ( procError syncType = "procError" procReady syncType = "procReady" diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go index 039059a444cc..e1d6eb18034c 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -31,25 +31,6 @@ func (p ParentDeathSignal) Set() error { return SetParentDeathSignal(uintptr(p)) } -// Eaccess is similar to unix.Access except for setuid/setgid binaries -// it checks against the effective (rather than real) uid and gid. -func Eaccess(path string) error { - err := unix.Faccessat2(unix.AT_FDCWD, path, unix.X_OK, unix.AT_EACCESS) - if err != unix.ENOSYS && err != unix.EPERM { //nolint:errorlint // unix errors are bare - return err - } - - // Faccessat2() not available; check if we are a set[ug]id binary. - if os.Getuid() == os.Geteuid() && os.Getgid() == os.Getegid() { - // For a non-set[ug]id binary, use access(2). - return unix.Access(path, unix.X_OK) - } - - // For a setuid/setgid binary, there is no fallback way - // so assume we can execute the binary. - return nil -} - func Execv(cmd string, args []string, env []string) error { name, err := exec.LookPath(cmd) if err != nil { diff --git a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 2473c5eaddce..a1e216683d90 100644 --- a/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/cluster-autoscaler/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -280,13 +280,13 @@ func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath // found in any entry in passwd and group respectively. // // Examples of valid user specifications are: -// * "" -// * "user" -// * "uid" -// * "user:group" -// * "uid:gid -// * "user:gid" -// * "uid:group" +// - "" +// - "user" +// - "uid" +// - "user:group" +// - "uid:gid +// - "user:gid" +// - "uid:group" // // It should be noted that if you specify a numeric user or group id, they will // not be evaluated as usernames (only the metadata will be filled). So attempting diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto index 181c79597da6..df4381c737fa 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/generated.proto @@ -213,8 +213,8 @@ message JobSpec { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is alpha-level. To use this field, you must enable the - // `JobPodFailurePolicy` feature gate (disabled by default). + // This field is beta-level. It can be used when the `JobPodFailurePolicy` + // feature gate is enabled (enabled by default). // +optional optional PodFailurePolicy podFailurePolicy = 11; diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go index 346676b09515..22cf9ee9cb61 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types.go @@ -252,8 +252,8 @@ type JobSpec struct { // checked against the backoffLimit. This field cannot be used in combination // with restartPolicy=OnFailure. // - // This field is alpha-level. To use this field, you must enable the - // `JobPodFailurePolicy` feature gate (disabled by default). + // This field is beta-level. It can be used when the `JobPodFailurePolicy` + // feature gate is enabled (enabled by default). // +optional PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"` diff --git a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 1f28f006cc7a..f6f3141f1897 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -115,7 +115,7 @@ var map_JobSpec = map[string]string{ "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", - "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is alpha-level. To use this field, you must enable the `JobPodFailurePolicy` feature gate (disabled by default).", + "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto index 94e0a71156cc..8ef67ca40bd5 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/generated.proto @@ -1853,7 +1853,8 @@ message HTTPGetAction { // HTTPHeader describes a custom header to be used in HTTP probes message HTTPHeader { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. optional string name = 1; // The header field value diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go index c9bb18a2cc77..c831d5961cf5 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types.go @@ -2137,7 +2137,8 @@ type SecretEnvSource struct { // HTTPHeader describes a custom header to be used in HTTP probes type HTTPHeader struct { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // The header field value Value string `json:"value" protobuf:"bytes,2,opt,name=value"` diff --git a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index a2cf00db87a7..a01ae3717373 100644 --- a/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/cluster-autoscaler/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -832,7 +832,7 @@ func (HTTPGetAction) SwaggerDoc() map[string]string { var map_HTTPHeader = map[string]string{ "": "HTTPHeader describes a custom header to be used in HTTP probes", - "name": "The header field name", + "name": "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.", "value": "The header field value", } diff --git a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go index 29fb4f950a40..db18ce1ce21b 100644 --- a/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go +++ b/cluster-autoscaler/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -41,7 +41,8 @@ func (n NamespacedName) String() string { // MarshalLog emits a struct containing required key/value pair func (n NamespacedName) MarshalLog() interface{} { return struct { - Name, Namespace string + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` }{ Name: n.Name, Namespace: n.Namespace, diff --git a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/common/values.go b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/common/values.go index e6d7b99757e6..d9034a80fb2a 100644 --- a/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/common/values.go +++ b/cluster-autoscaler/vendor/k8s.io/apiserver/pkg/cel/common/values.go @@ -26,9 +26,10 @@ import ( "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + "k8s.io/kube-openapi/pkg/validation/strfmt" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apiserver/pkg/cel" - "k8s.io/kube-openapi/pkg/validation/strfmt" ) // UnstructuredToVal converts a Kubernetes unstructured data element to a CEL Val. @@ -425,7 +426,22 @@ var _ = traits.Lister(&unstructuredList{}) func (t *unstructuredList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { switch typeDesc.Kind() { case reflect.Slice: - return t.elements, nil + switch t.itemsSchema.Type() { + // Workaround for https://github.com/kubernetes/kubernetes/issues/117590 until we + // resolve the desired behavior in cel-go via https://github.com/google/cel-go/issues/688 + case "string": + var result []string + for _, e := range t.elements { + s, ok := e.(string) + if !ok { + return nil, fmt.Errorf("expected all elements to be of type string, but got %T", e) + } + result = append(result, s) + } + return result, nil + default: + return t.elements, nil + } } return nil, fmt.Errorf("type conversion error from '%s' to '%s'", t.Type(), typeDesc) } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go index 641568008b7b..1253fa1f44a4 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "mime" "net/http" "net/url" "sort" @@ -58,8 +59,9 @@ const ( defaultBurst = 300 AcceptV1 = runtime.ContentTypeJSON - // Aggregated discovery content-type (currently v2beta1). NOTE: Currently, we are assuming the order - // for "g", "v", and "as" from the server. We can only compare this string if we can make that assumption. + // Aggregated discovery content-type (v2beta1). NOTE: content-type parameters + // MUST be ordered (g, v, as) for server in "Accept" header (BUT we are resilient + // to ordering when comparing returned values in "Content-Type" header). AcceptV2Beta1 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" // Prioritize aggregated discovery by placing first in the order of discovery accept types. acceptDiscoveryFormats = AcceptV2Beta1 + "," + AcceptV1 @@ -259,8 +261,16 @@ func (d *DiscoveryClient) downloadLegacy() ( var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList // Switch on content-type server responded with: aggregated or unaggregated. - switch responseContentType { - case AcceptV1: + switch { + case isV2Beta1ContentType(responseContentType): + var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList + err = json.Unmarshal(body, &aggregatedDiscovery) + if err != nil { + return nil, nil, nil, err + } + apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) + default: + // Default is unaggregated discovery v1. var v metav1.APIVersions err = json.Unmarshal(body, &v) if err != nil { @@ -271,15 +281,6 @@ func (d *DiscoveryClient) downloadLegacy() ( apiGroup = apiVersionsToAPIGroup(&v) } apiGroupList.Groups = []metav1.APIGroup{apiGroup} - case AcceptV2Beta1: - var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList - err = json.Unmarshal(body, &aggregatedDiscovery) - if err != nil { - return nil, nil, nil, err - } - apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) - default: - return nil, nil, nil, fmt.Errorf("Unknown discovery response content-type: %s", responseContentType) } return apiGroupList, resourcesByGV, failedGVs, nil @@ -313,13 +314,8 @@ func (d *DiscoveryClient) downloadAPIs() ( failedGVs := map[schema.GroupVersion]error{} var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList // Switch on content-type server responded with: aggregated or unaggregated. - switch responseContentType { - case AcceptV1: - err = json.Unmarshal(body, apiGroupList) - if err != nil { - return nil, nil, nil, err - } - case AcceptV2Beta1: + switch { + case isV2Beta1ContentType(responseContentType): var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList err = json.Unmarshal(body, &aggregatedDiscovery) if err != nil { @@ -327,12 +323,38 @@ func (d *DiscoveryClient) downloadAPIs() ( } apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery) default: - return nil, nil, nil, fmt.Errorf("Unknown discovery response content-type: %s", responseContentType) + // Default is unaggregated discovery v1. + err = json.Unmarshal(body, apiGroupList) + if err != nil { + return nil, nil, nil, err + } } return apiGroupList, resourcesByGV, failedGVs, nil } +// isV2Beta1ContentType checks of the content-type string is both +// "application/json" and contains the v2beta1 content-type params. +// NOTE: This function is resilient to the ordering of the +// content-type parameters, as well as parameters added by +// intermediaries such as proxies or gateways. Examples: +// +// "application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" = true +// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io" = true +// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8" = true +// "application/json" = false +// "application/json; charset=UTF-8" = false +func isV2Beta1ContentType(contentType string) bool { + base, params, err := mime.ParseMediaType(contentType) + if err != nil { + return false + } + return runtime.ContentTypeJSON == base && + params["g"] == "apidiscovery.k8s.io" && + params["v"] == "v2beta1" && + params["as"] == "APIGroupDiscoveryList" +} + // ServerGroups returns the supported groups, with information like supported versions and the // preferred version. func (d *DiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/openapi/client.go b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/client.go index 7b58762acfd5..6a43057187e9 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/openapi/client.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/client.go @@ -19,6 +19,7 @@ package openapi import ( "context" "encoding/json" + "strings" "k8s.io/client-go/rest" "k8s.io/kube-openapi/pkg/handler3" @@ -58,7 +59,11 @@ func (c *client) Paths() (map[string]GroupVersion, error) { // Create GroupVersions for each element of the result result := map[string]GroupVersion{} for k, v := range discoMap.Paths { - result[k] = newGroupVersion(c, v) + // If the server returned a URL rooted at /openapi/v3, preserve any additional client-side prefix. + // If the server returned a URL not rooted at /openapi/v3, treat it as an actual server-relative URL. + // See https://github.com/kubernetes/kubernetes/issues/117463 for details + useClientPrefix := strings.HasPrefix(v.ServerRelativeURL, "/openapi/v3") + result[k] = newGroupVersion(c, v, useClientPrefix) } return result, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/client-go/openapi/groupversion.go b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/groupversion.go index 32133a29b8a4..601dcbe3ccb2 100644 --- a/cluster-autoscaler/vendor/k8s.io/client-go/openapi/groupversion.go +++ b/cluster-autoscaler/vendor/k8s.io/client-go/openapi/groupversion.go @@ -18,6 +18,7 @@ package openapi import ( "context" + "net/url" "k8s.io/kube-openapi/pkg/handler3" ) @@ -29,18 +30,41 @@ type GroupVersion interface { } type groupversion struct { - client *client - item handler3.OpenAPIV3DiscoveryGroupVersion + client *client + item handler3.OpenAPIV3DiscoveryGroupVersion + useClientPrefix bool } -func newGroupVersion(client *client, item handler3.OpenAPIV3DiscoveryGroupVersion) *groupversion { - return &groupversion{client: client, item: item} +func newGroupVersion(client *client, item handler3.OpenAPIV3DiscoveryGroupVersion, useClientPrefix bool) *groupversion { + return &groupversion{client: client, item: item, useClientPrefix: useClientPrefix} } func (g *groupversion) Schema(contentType string) ([]byte, error) { - return g.client.restClient.Get(). - RequestURI(g.item.ServerRelativeURL). - SetHeader("Accept", contentType). - Do(context.TODO()). - Raw() + if !g.useClientPrefix { + return g.client.restClient.Get(). + RequestURI(g.item.ServerRelativeURL). + SetHeader("Accept", contentType). + Do(context.TODO()). + Raw() + } + + locator, err := url.Parse(g.item.ServerRelativeURL) + if err != nil { + return nil, err + } + + path := g.client.restClient.Get(). + AbsPath(locator.Path). + SetHeader("Accept", contentType) + + // Other than root endpoints(openapiv3/apis), resources have hash query parameter to support etags. + // However, absPath does not support handling query parameters internally, + // so that hash query parameter is added manually + for k, value := range locator.Query() { + for _, v := range value { + path.Param(k, v) + } + } + + return path.Do(context.TODO()).Raw() } diff --git a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go index 84e902646537..37cb96f1be11 100644 --- a/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go +++ b/cluster-autoscaler/vendor/k8s.io/kube-openapi/pkg/handler/handler.go @@ -22,6 +22,7 @@ import ( "fmt" "net/http" "strconv" + "sync" "time" "github.com/NYTimes/gziphandler" @@ -98,16 +99,6 @@ func NewOpenAPIServiceLazy(swagger cached.Data[*spec.Swagger]) *OpenAPIService { return o } -func (o *OpenAPIService) getSwaggerBytes() (timedSpec, string, error) { - result := o.jsonCache.Get() - return result.Data, result.Etag, result.Err -} - -func (o *OpenAPIService) getSwaggerPbBytes() (timedSpec, string, error) { - result := o.protoCache.Get() - return result.Data, result.Etag, result.Err -} - func (o *OpenAPIService) UpdateSpec(swagger *spec.Swagger) error { o.UpdateSpecLazy(cached.NewResultOK(swagger, uuid.New().String())) return nil @@ -135,6 +126,9 @@ func RegisterOpenAPIVersionedService(spec *spec.Swagger, servePath string, handl // RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec. func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handler common.PathHandler) error { + // Mutex protects the cache chain + var mutex sync.Mutex + accepted := []struct { Type string SubType string @@ -163,7 +157,9 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl continue } // serve the first matching media type in the sorted clause list + mutex.Lock() result := accepts.GetDataAndEtag.Get() + mutex.Unlock() if result.Err != nil { klog.Errorf("Error in OpenAPI handler: %s", result.Err) // only return a 503 if we have no older cache data to serve diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go index d8f657b74230..5d5b51b1bce3 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -2037,7 +2037,8 @@ type SecretEnvSource struct { // HTTPHeader describes a custom header to be used in HTTP probes type HTTPHeader struct { - // The header field name + // The header field name. + // This will be canonicalized upon output, so case-variant names will be understood as the same header. Name string // The header field value Value string diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index dae179775042..bf641bfa8fa6 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -1078,7 +1078,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha}, - NewVolumeManagerReconstruction: {Default: true, PreRelease: featuregate.Beta}, + NewVolumeManagerReconstruction: {Default: false, PreRelease: featuregate.Beta}, // disabled for https://github.com/kubernetes/kubernetes/issues/117745 NodeLogQuery: {Default: false, PreRelease: featuregate.Alpha}, @@ -1160,7 +1160,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS NodeInclusionPolicyInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta}, - SELinuxMountReadWriteOncePod: {Default: true, PreRelease: featuregate.Beta}, + SELinuxMountReadWriteOncePod: {Default: false, PreRelease: featuregate.Beta}, // disabled for https://github.com/kubernetes/kubernetes/issues/117745 InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go index 8cb57aa8190f..7499de4460f2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go @@ -544,15 +544,29 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi return nil, fmt.Errorf("pod %q container %q changed request for resource %q from %d to %d", string(podUID), contName, resource, devices.Len(), required) } } + + klog.V(3).InfoS("Need devices to allocate for pod", "deviceNumber", needed, "resourceName", resource, "podUID", string(podUID), "containerName", contName) + healthyDevices, hasRegistered := m.healthyDevices[resource] + + // Check if resource registered with devicemanager + if !hasRegistered { + return nil, fmt.Errorf("cannot allocate unregistered device %s", resource) + } + + // Check if registered resource has healthy devices + if healthyDevices.Len() == 0 { + return nil, fmt.Errorf("no healthy devices present; cannot allocate unhealthy devices %s", resource) + } + + // Check if all the previously allocated devices are healthy + if !healthyDevices.IsSuperset(devices) { + return nil, fmt.Errorf("previously allocated devices are no longer healthy; cannot allocate unhealthy devices %s", resource) + } + if needed == 0 { // No change, no work. return nil, nil } - klog.V(3).InfoS("Need devices to allocate for pod", "deviceNumber", needed, "resourceName", resource, "podUID", string(podUID), "containerName", contName) - // Check if resource registered with devicemanager - if _, ok := m.healthyDevices[resource]; !ok { - return nil, fmt.Errorf("can't allocate unregistered device %s", resource) - } // Declare the list of allocated devices. // This will be populated and returned below. diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go index 0605ab4d328d..1e6359f56874 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/helpers.go @@ -212,32 +212,36 @@ func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus) *kubecontainer.Runtim return &kubecontainer.RuntimeStatus{Conditions: conditions} } -func fieldProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) string { +func fieldProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) (string, error) { if scmp == nil { if fallbackToRuntimeDefault { - return v1.SeccompProfileRuntimeDefault + return v1.SeccompProfileRuntimeDefault, nil } - return "" + return "", nil } if scmp.Type == v1.SeccompProfileTypeRuntimeDefault { - return v1.SeccompProfileRuntimeDefault + return v1.SeccompProfileRuntimeDefault, nil } - if scmp.Type == v1.SeccompProfileTypeLocalhost && scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 { - fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile) - return v1.SeccompLocalhostProfileNamePrefix + fname + if scmp.Type == v1.SeccompProfileTypeLocalhost { + if scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 { + fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile) + return v1.SeccompLocalhostProfileNamePrefix + fname, nil + } else { + return "", fmt.Errorf("localhostProfile must be set if seccompProfile type is Localhost.") + } } if scmp.Type == v1.SeccompProfileTypeUnconfined { - return v1.SeccompProfileNameUnconfined + return v1.SeccompProfileNameUnconfined, nil } if fallbackToRuntimeDefault { - return v1.SeccompProfileRuntimeDefault + return v1.SeccompProfileRuntimeDefault, nil } - return "" + return "", nil } func (m *kubeGenericRuntimeManager) getSeccompProfilePath(annotations map[string]string, containerName string, - podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) string { + podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) (string, error) { // container fields are applied first if containerSecContext != nil && containerSecContext.SeccompProfile != nil { return fieldProfile(containerSecContext.SeccompProfile, m.seccompProfileRoot, fallbackToRuntimeDefault) @@ -249,42 +253,46 @@ func (m *kubeGenericRuntimeManager) getSeccompProfilePath(annotations map[string } if fallbackToRuntimeDefault { - return v1.SeccompProfileRuntimeDefault + return v1.SeccompProfileRuntimeDefault, nil } - return "" + return "", nil } -func fieldSeccompProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) *runtimeapi.SecurityProfile { +func fieldSeccompProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) (*runtimeapi.SecurityProfile, error) { if scmp == nil { if fallbackToRuntimeDefault { return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, - } + }, nil } return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_Unconfined, - } + }, nil } if scmp.Type == v1.SeccompProfileTypeRuntimeDefault { return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, - } + }, nil } - if scmp.Type == v1.SeccompProfileTypeLocalhost && scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 { - fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile) - return &runtimeapi.SecurityProfile{ - ProfileType: runtimeapi.SecurityProfile_Localhost, - LocalhostRef: fname, + if scmp.Type == v1.SeccompProfileTypeLocalhost { + if scmp.LocalhostProfile != nil && len(*scmp.LocalhostProfile) > 0 { + fname := filepath.Join(profileRootPath, *scmp.LocalhostProfile) + return &runtimeapi.SecurityProfile{ + ProfileType: runtimeapi.SecurityProfile_Localhost, + LocalhostRef: fname, + }, nil + } else { + return nil, fmt.Errorf("localhostProfile must be set if seccompProfile type is Localhost.") } } return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_Unconfined, - } + }, nil } func (m *kubeGenericRuntimeManager) getSeccompProfile(annotations map[string]string, containerName string, - podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) *runtimeapi.SecurityProfile { + podSecContext *v1.PodSecurityContext, containerSecContext *v1.SecurityContext, fallbackToRuntimeDefault bool) (*runtimeapi.SecurityProfile, error) { // container fields are applied first if containerSecContext != nil && containerSecContext.SeccompProfile != nil { return fieldSeccompProfile(containerSecContext.SeccompProfile, m.seccompProfileRoot, fallbackToRuntimeDefault) @@ -298,10 +306,10 @@ func (m *kubeGenericRuntimeManager) getSeccompProfile(annotations map[string]str if fallbackToRuntimeDefault { return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_RuntimeDefault, - } + }, nil } return &runtimeapi.SecurityProfile{ ProfileType: runtimeapi.SecurityProfile_Unconfined, - } + }, nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go index 5e6f05b4e187..d933a7104247 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/security_context.go @@ -37,9 +37,16 @@ func (m *kubeGenericRuntimeManager) determineEffectiveSecurityContext(pod *v1.Po // TODO: Deprecated, remove after we switch to Seccomp field // set SeccompProfilePath. - synthesized.SeccompProfilePath = m.getSeccompProfilePath(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault) + var err error + synthesized.SeccompProfilePath, err = m.getSeccompProfilePath(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault) + if err != nil { + return nil, err + } - synthesized.Seccomp = m.getSeccompProfile(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault) + synthesized.Seccomp, err = m.getSeccompProfile(pod.Annotations, container.Name, pod.Spec.SecurityContext, container.SecurityContext, m.seccompDefault) + if err != nil { + return nil, err + } // set ApparmorProfile. synthesized.ApparmorProfile = apparmor.GetProfileNameFromPodAnnotations(pod.Annotations, container.Name) diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go index 8d72e1ad55ef..e5805dbcf786 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go @@ -775,16 +775,23 @@ func (p *podWorkers) UpdatePod(options UpdatePodOptions) { } // if this pod is being synced for the first time, we need to make sure it is an active pod if options.Pod != nil && (options.Pod.Status.Phase == v1.PodFailed || options.Pod.Status.Phase == v1.PodSucceeded) { - // check to see if the pod is not running and the pod is terminal. - // If this succeeds then record in the podWorker that it is terminated. + // Check to see if the pod is not running and the pod is terminal; if this succeeds then record in the podWorker that it is terminated. + // This is needed because after a kubelet restart, we need to ensure terminal pods will NOT be considered active in Pod Admission. See http://issues.k8s.io/105523 + // However, `filterOutInactivePods`, considers pods that are actively terminating as active. As a result, `IsPodKnownTerminated()` needs to return true and thus `terminatedAt` needs to be set. if statusCache, err := p.podCache.Get(uid); err == nil { if isPodStatusCacheTerminal(statusCache) { + // At this point we know: + // (1) The pod is terminal based on the config source. + // (2) The pod is terminal based on the runtime cache. + // This implies that this pod had already completed `SyncTerminatingPod` sometime in the past. The pod is likely being synced for the first time due to a kubelet restart. + // These pods need to complete SyncTerminatedPod to ensure that all resources are cleaned and that the status manager makes the final status updates for the pod. + // As a result, set finished: false, to ensure a Terminated event will be sent and `SyncTerminatedPod` will run. status = &podSyncStatus{ terminatedAt: now, terminatingAt: now, syncedAt: now, startedTerminating: true, - finished: true, + finished: false, fullname: kubecontainer.BuildPodFullName(name, ns), } } @@ -1086,6 +1093,10 @@ func (p *podWorkers) cleanupUnstartedPod(pod *v1.Pod, status *podSyncStatus) { // or can be started, and updates the cached pod state so that downstream components can observe what the // pod worker goroutine is currently attempting to do. If ok is false, there is no available event. If any // of the boolean values is false, ensure the appropriate cleanup happens before returning. +// +// This method should ensure that either status.pendingUpdate is cleared and merged into status.activeUpdate, +// or when a pod cannot be started status.pendingUpdate remains the same. Pods that have not been started +// should never have an activeUpdate because that is exposed to downstream components on started pods. func (p *podWorkers) startPodSync(podUID types.UID) (ctx context.Context, update podWork, canStart, canEverStart, ok bool) { p.podLock.Lock() defer p.podLock.Unlock() @@ -1159,6 +1170,8 @@ func (p *podWorkers) startPodSync(podUID types.UID) (ctx context.Context, update klog.V(4).InfoS("Pod cannot start ever", "pod", klog.KObj(update.Options.Pod), "podUID", podUID, "updateType", update.WorkType) return ctx, update, canStart, canEverStart, true case !canStart: + // this is the only path we don't start the pod, so we need to put the change back in pendingUpdate + status.pendingUpdate = &update.Options status.working = false klog.V(4).InfoS("Pod cannot start yet", "pod", klog.KObj(update.Options.Pod), "podUID", podUID) return ctx, update, canStart, canEverStart, true @@ -1545,9 +1558,17 @@ func (p *podWorkers) SyncKnownPods(desiredPods []*v1.Pod) map[types.UID]PodWorke State: status.WorkType(), Orphan: orphan, } - if status.activeUpdate != nil && status.activeUpdate.Pod != nil { - sync.HasConfig = true - sync.Static = kubetypes.IsStaticPod(status.activeUpdate.Pod) + switch { + case status.activeUpdate != nil: + if status.activeUpdate.Pod != nil { + sync.HasConfig = true + sync.Static = kubetypes.IsStaticPod(status.activeUpdate.Pod) + } + case status.pendingUpdate != nil: + if status.pendingUpdate.Pod != nil { + sync.HasConfig = true + sync.Static = kubetypes.IsStaticPod(status.pendingUpdate.Pod) + } } workers[uid] = sync } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/request.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/request.go index 4285c0a4ccbe..fb7f818b2492 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/request.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/probe/http/request.go @@ -113,7 +113,7 @@ func formatURL(scheme string, host string, port int, path string) *url.URL { func v1HeaderToHTTPHeader(headerList []v1.HTTPHeader) http.Header { headers := make(http.Header) for _, header := range headerList { - headers[header.Name] = append(headers[header.Name], header.Value) + headers.Add(header.Name, header.Value) } return headers } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go index ab0b9eaaa14d..cc173eae5c10 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink.go @@ -40,4 +40,9 @@ type NetLinkHandle interface { // Only the addresses of the current family are returned. // IPv6 link-local and loopback addresses are excluded. GetLocalAddresses(dev string) (sets.Set[string], error) + // GetAllLocalAddressesExcept return all local addresses on the node, except from the passed dev. + // This is not the same as to take the diff between GetAllLocalAddresses and GetLocalAddresses + // since an address can be assigned to many interfaces. This problem raised + // https://github.com/kubernetes/kubernetes/issues/114815 + GetAllLocalAddressesExcept(dev string) (sets.Set[string], error) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go index f4d2368885d9..1c0f8c2b3432 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_linux.go @@ -24,6 +24,7 @@ import ( "net" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" utilproxy "k8s.io/kubernetes/pkg/proxy/util" netutils "k8s.io/utils/net" @@ -164,3 +165,30 @@ func (h *netlinkHandle) isValidForSet(ip net.IP) bool { } return true } + +// GetAllLocalAddressesExcept return all local addresses on the node, +// except from the passed dev. This is not the same as to take the +// diff between GetAllLocalAddresses and GetLocalAddresses since an +// address can be assigned to many interfaces. This problem raised +// https://github.com/kubernetes/kubernetes/issues/114815 +func (h *netlinkHandle) GetAllLocalAddressesExcept(dev string) (sets.Set[string], error) { + ifaces, err := net.Interfaces() + if err != nil { + return nil, err + } + var addr []net.Addr + for _, iface := range ifaces { + if iface.Name == dev { + continue + } + ifadr, err := iface.Addrs() + if err != nil { + // This may happen if the interface was deleted. Ignore + // but log the error. + klog.ErrorS(err, "Reading addresses", "interface", iface.Name) + continue + } + addr = append(addr, ifadr...) + } + return utilproxy.AddressSet(h.isValidForSet, addr), nil +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go index 31f3fb7406b2..1cb38d3fb8f0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/netlink_unsupported.go @@ -71,6 +71,11 @@ func (h *netlinkHandle) GetLocalAddresses(dev string) (sets.Set[string], error) return nil, fmt.Errorf("netlink is not supported in this platform") } +// GetAllLocalAddressesExcept is part of interface. +func (h *netlinkHandle) GetAllLocalAddressesExcept(dev string) (sets.Set[string], error) { + return nil, fmt.Errorf("netlink is not supported in this platform") +} + // Must match the one in proxier_test.go func (h *netlinkHandle) isValidForSet(ip net.IP) bool { return false diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go index cf52b2fcdcee..4b676101866f 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go @@ -1013,11 +1013,10 @@ func (proxier *Proxier) syncProxyRules() { klog.ErrorS(err, "Error listing addresses binded to dummy interface") } // nodeAddressSet All addresses *except* those on the dummy interface - nodeAddressSet, err := proxier.netlinkHandle.GetAllLocalAddresses() + nodeAddressSet, err := proxier.netlinkHandle.GetAllLocalAddressesExcept(defaultDummyDevice) if err != nil { klog.ErrorS(err, "Error listing node addresses") } - nodeAddressSet = nodeAddressSet.Difference(alreadyBoundAddrs) hasNodePort := false for _, svc := range proxier.svcPortMap { @@ -1193,9 +1192,13 @@ func (proxier *Proxier) syncProxyRules() { if proxier.ipvsScheduler == "mh" { serv.Flags |= utilipvs.FlagSourceHash } - if err := proxier.syncService(svcPortNameString, serv, true, alreadyBoundAddrs); err == nil { + // We must not add the address to the dummy device if it exist on another interface + shouldBind := !nodeAddressSet.Has(serv.Address.String()) + if err := proxier.syncService(svcPortNameString, serv, shouldBind, alreadyBoundAddrs); err == nil { activeIPVSServices.Insert(serv.String()) - activeBindAddrs.Insert(serv.Address.String()) + if shouldBind { + activeBindAddrs.Insert(serv.Address.String()) + } if err := proxier.syncEndpoint(svcPortName, svcInfo.ExternalPolicyLocal(), serv); err != nil { klog.ErrorS(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv) } @@ -1296,9 +1299,13 @@ func (proxier *Proxier) syncProxyRules() { if proxier.ipvsScheduler == "mh" { serv.Flags |= utilipvs.FlagSourceHash } - if err := proxier.syncService(svcPortNameString, serv, true, alreadyBoundAddrs); err == nil { + // We must not add the address to the dummy device if it exist on another interface + shouldBind := !nodeAddressSet.Has(serv.Address.String()) + if err := proxier.syncService(svcPortNameString, serv, shouldBind, alreadyBoundAddrs); err == nil { activeIPVSServices.Insert(serv.String()) - activeBindAddrs.Insert(serv.Address.String()) + if shouldBind { + activeBindAddrs.Insert(serv.Address.String()) + } if err := proxier.syncEndpoint(svcPortName, svcInfo.ExternalPolicyLocal(), serv); err != nil { klog.ErrorS(err, "Failed to sync endpoint for service", "servicePortName", svcPortName, "virtualServer", serv) } @@ -1726,6 +1733,9 @@ func (proxier *Proxier) writeIptablesRules() { proxier.filterRules.Write( "-A", string(kubeIPVSFilterChain), "-m", "set", "--match-set", proxier.ipsetList[kubeExternalIPSet].Name, "dst,dst", "-j", "RETURN") + proxier.filterRules.Write( + "-A", string(kubeIPVSFilterChain), + "-m", "set", "--match-set", proxier.ipsetList[kubeHealthCheckNodePortSet].Name, "dst", "-j", "RETURN") proxier.filterRules.Write( "-A", string(kubeIPVSFilterChain), "-m", "conntrack", "--ctstate", "NEW", diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go index 68e215dd2a5c..933ee0ff68ba 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/preemption/preemption.go @@ -355,6 +355,7 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1. // Otherwise we should delete the victim. if waitingPod := fh.GetWaitingPod(victim.UID); waitingPod != nil { waitingPod.Reject(pluginName, "preempted") + klog.V(2).InfoS("Preemptor pod rejected a waiting pod", "preemptor", klog.KObj(pod), "waitingPod", klog.KObj(victim), "node", c.Name()) } else { if feature.DefaultFeatureGate.Enabled(features.PodDisruptionConditions) { victimPodApply := corev1apply.Pod(victim.Name, victim.Namespace).WithStatus(corev1apply.PodStatus()) @@ -377,7 +378,9 @@ func (ev *Evaluator) prepareCandidate(ctx context.Context, c Candidate, pod *v1. errCh.SendErrorWithCancel(err, cancel) return } + klog.V(2).InfoS("Preemptor Pod preempted victim Pod", "preemptor", klog.KObj(pod), "victim", klog.KObj(victim), "node", c.Name()) } + fh.EventRecorder().Eventf(victim, pod, v1.EventTypeNormal, "Preempted", "Preempting", "Preempted by a pod on node %v", c.Name()) } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go index d8684f5ae01c..8c5e9518ab3e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/framework.go @@ -358,6 +358,20 @@ func NewFramework(r Registry, profile *config.KubeSchedulerProfile, stopCh <-cha options.captureProfile(outputProfile) } + // Cache metric streams for prefilter and filter plugins. + for i, pl := range f.preFilterPlugins { + f.preFilterPlugins[i] = &instrumentedPreFilterPlugin{ + PreFilterPlugin: f.preFilterPlugins[i], + metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.PreFilter, f.profileName), + } + } + for i, pl := range f.filterPlugins { + f.filterPlugins[i] = &instrumentedFilterPlugin{ + FilterPlugin: f.filterPlugins[i], + metric: metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.Filter, f.profileName), + } + } + return f, nil } @@ -614,7 +628,6 @@ func (f *frameworkImpl) RunPreFilterPlugins(ctx context.Context, state *framewor skipPlugins.Insert(pl.Name()) continue } - metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.PreFilter, f.profileName).Inc() if !s.IsSuccess() { s.SetFailedPlugin(pl.Name()) if s.IsUnschedulable() { @@ -732,7 +745,6 @@ func (f *frameworkImpl) RunFilterPlugins( if state.SkipFilterPlugins.Has(pl.Name()) { continue } - metrics.PluginEvaluationTotal.WithLabelValues(pl.Name(), metrics.Filter, f.profileName).Inc() if status := f.runFilterPlugin(ctx, pl, state, pod, nodeInfo); !status.IsSuccess() { if !status.IsUnschedulable() { // Filter plugins are not supposed to return any status other than diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/instrumented_plugins.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/instrumented_plugins.go new file mode 100644 index 000000000000..152d6788a9e1 --- /dev/null +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/scheduler/framework/runtime/instrumented_plugins.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "context" + + v1 "k8s.io/api/core/v1" + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/kubernetes/pkg/scheduler/framework" +) + +type instrumentedFilterPlugin struct { + framework.FilterPlugin + + metric compbasemetrics.CounterMetric +} + +var _ framework.FilterPlugin = &instrumentedFilterPlugin{} + +func (p *instrumentedFilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { + p.metric.Inc() + return p.FilterPlugin.Filter(ctx, state, pod, nodeInfo) +} + +type instrumentedPreFilterPlugin struct { + framework.PreFilterPlugin + + metric compbasemetrics.CounterMetric +} + +var _ framework.PreFilterPlugin = &instrumentedPreFilterPlugin{} + +func (p *instrumentedPreFilterPlugin) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { + result, status := p.PreFilterPlugin.PreFilter(ctx, state, pod) + if !status.IsSkip() { + p.metric.Inc() + } + return result, status +} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go index 7a1e5e58178b..ae7151149785 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/configmap/configmap.go @@ -252,7 +252,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterA setPerms := func(_ string) error { // This may be the first time writing and new files get created outside the timestamp subdirectory: // change the permissions on the whole volume and not only in the timestamp directory. - return volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) + return volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) } err = writer.Write(payload, setPerms) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go index 8ffb3acf49cf..ef3c98258ac8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_attacher.go @@ -595,14 +595,13 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { driverName = data[volDataKey.driverName] volID = data[volDataKey.volHandle] } else { - klog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err)) - - // The volume might have been mounted by old CSI volume plugin. Fall back to the old behavior: read PV from API server - driverName, volID, err = getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath) - if err != nil { - klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err)) - return err + if errors.Is(err, os.ErrNotExist) { + klog.V(4).Info(log("attacher.UnmountDevice skipped because volume data file [%s] does not exist", dataDir)) + return nil } + + klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err)) + return err } if c.csiClient == nil { @@ -682,36 +681,6 @@ func makeDeviceMountPath(plugin *csiPlugin, spec *volume.Spec) (string, error) { return filepath.Join(plugin.host.GetPluginDir(plugin.GetPluginName()), driver, volSha, globalMountInGlobalPath), nil } -func getDriverAndVolNameFromDeviceMountPath(k8s kubernetes.Interface, deviceMountPath string) (string, string, error) { - // deviceMountPath structure: /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname}/globalmount - dir := filepath.Dir(deviceMountPath) - if file := filepath.Base(deviceMountPath); file != globalMountInGlobalPath { - return "", "", errors.New(log("getDriverAndVolNameFromDeviceMountPath failed, path did not end in %s", globalMountInGlobalPath)) - } - // dir is now /var/lib/kubelet/plugins/kubernetes.io/csi/pv/{pvname} - pvName := filepath.Base(dir) - - // Get PV and check for errors - pv, err := k8s.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) - if err != nil { - return "", "", err - } - if pv == nil || pv.Spec.CSI == nil { - return "", "", errors.New(log("getDriverAndVolNameFromDeviceMountPath could not find CSI Persistent Volume Source for pv: %s", pvName)) - } - - // Get VolumeHandle and PluginName from pv - csiSource := pv.Spec.CSI - if csiSource.Driver == "" { - return "", "", errors.New(log("getDriverAndVolNameFromDeviceMountPath failed, driver name empty")) - } - if csiSource.VolumeHandle == "" { - return "", "", errors.New(log("getDriverAndVolNameFromDeviceMountPath failed, VolumeHandle empty")) - } - - return csiSource.Driver, csiSource.VolumeHandle, nil -} - func verifyAttachmentStatus(attachment *storage.VolumeAttachment, volumeHandle string) (bool, error) { // when we received a deleted event during attachment, fail fast if attachment == nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go index 1974b0367531..468f882b8845 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go @@ -333,7 +333,7 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error // Driver doesn't support applying FSGroup. Kubelet must apply it instead. // fullPluginName helps to distinguish different driver from csi plugin - err := volume.SetVolumeOwnership(c, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(c.plugin, c.spec)) + err := volume.SetVolumeOwnership(c, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(c.plugin, c.spec)) if err != nil { // At this point mount operation is successful: // 1. Since volume can not be used by the pod because of invalid permissions, we must return error diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go index ee2bdc193b32..bb4d799ff3c8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_util.go @@ -79,7 +79,7 @@ func loadVolumeData(dir string, fileName string) (map[string]string, error) { file, err := os.Open(dataFileName) if err != nil { - return nil, errors.New(log("failed to open volume data file [%s]: %v", dataFileName, err)) + return nil, fmt.Errorf("%s: %w", log("failed to open volume data file [%s]", dataFileName), err) } defer file.Close() data := map[string]string{} diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go index b13e6ea6015c..54364009d018 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/downwardapi/downwardapi.go @@ -223,7 +223,7 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, mounterArgs volume.Mounte setPerms := func(_ string) error { // This may be the first time writing and new files get created outside the timestamp subdirectory: // change the permissions on the whole volume and not only in the timestamp directory. - return volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) + return volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) } err = writer.Write(data, setPerms) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go index 9ad981c54bd4..e75bccd4927e 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/emptydir/empty_dir.go @@ -280,7 +280,7 @@ func (ed *emptyDir) SetUpAt(dir string, mounterArgs volume.MounterArgs) error { err = fmt.Errorf("unknown storage medium %q", ed.medium) } - volume.SetVolumeOwnership(ed, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(ed.plugin, nil)) + volume.SetVolumeOwnership(ed, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(ed.plugin, nil)) // If setting up the quota fails, just log a message but don't actually error out. // We'll use the old du mechanism in this case, at least until we support diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go index bb054ea16618..02e15c4f85c5 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/fc/disk_manager.go @@ -91,7 +91,7 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou } if !b.readOnly { - volume.SetVolumeOwnership(&b, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(&b, volPath, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go index 8098cfdb66ee..3821af7e9235 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/flexvolume/mounter.go @@ -95,7 +95,7 @@ func (f *flexVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) if !f.readOnly { if f.plugin.capabilities.FSGroup { // fullPluginName helps to distinguish different driver from flex volume plugin - volume.SetVolumeOwnership(f, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(f.plugin, f.spec)) + volume.SetVolumeOwnership(f, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(f.plugin, f.spec)) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go index 7bbeade0ef08..8dd63cf623cd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/gcepd/gce_pd.go @@ -430,7 +430,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, mounterArgs volume.Mounte klog.V(4).Infof("mount of disk %s succeeded", dir) if !b.readOnly { - if err := volume.SetVolumeOwnership(b, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)); err != nil { + if err := volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)); err != nil { klog.Errorf("SetVolumeOwnership returns error %v", err) } } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go index fe890032e27d..995018d90072 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/git_repo/git_repo.go @@ -235,7 +235,7 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArg return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err) } - volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) volumeutil.SetReady(b.getMetaDir()) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go index 6d60e44efaf0..6aa8652bd6b0 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/iscsi/disk_manager.go @@ -96,7 +96,7 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter } if !b.readOnly { - volume.SetVolumeOwnership(&b, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(&b, volPath, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go index ca0bc3040021..0c8fe0753967 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/local/local.go @@ -615,7 +615,7 @@ func (m *localVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) if !m.readOnly { // Volume owner will be written only once on the first volume mount if len(refs) == 0 { - return volume.SetVolumeOwnership(m, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(m.plugin, nil)) + return volume.SetVolumeOwnership(m, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(m.plugin, nil)) } } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go index e0eaf94495d3..6b9243f52341 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/portworx/portworx.go @@ -335,7 +335,7 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterAr return err } if !b.readOnly { - volume.SetVolumeOwnership(b, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) } klog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir) return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go index c82b38653e37..deb7728168a8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/projected/projected.go @@ -233,7 +233,7 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterA setPerms := func(_ string) error { // This may be the first time writing and new files get created outside the timestamp subdirectory: // change the permissions on the whole volume and not only in the timestamp directory. - return volume.SetVolumeOwnership(s, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(s.plugin, nil)) + return volume.SetVolumeOwnership(s, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(s.plugin, nil)) } err = writer.Write(data, setPerms) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go index edff33540f4d..2131c7ecedd8 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/rbd/disk_manager.go @@ -96,7 +96,7 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. klog.V(3).Infof("rbd: successfully bind mount %s to %s with options %v", globalPDPath, volPath, mountOptions) if !b.ReadOnly { - volume.SetVolumeOwnership(&b, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(&b, volPath, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) } return nil diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go index f43f1bffa3b9..f1d2c9c59ffd 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/secret/secret.go @@ -247,7 +247,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs setPerms := func(_ string) error { // This may be the first time writing and new files get created outside the timestamp subdirectory: // change the permissions on the whole volume and not only in the timestamp directory. - return volume.SetVolumeOwnership(b, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) + return volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, nil /*fsGroupChangePolicy*/, volumeutil.FSGroupCompleteHook(b.plugin, nil)) } err = writer.Write(payload, setPerms) if err != nil { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go index 57c02815029a..ec7f6da4bfe9 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go @@ -40,22 +40,22 @@ const ( // SetVolumeOwnership modifies the given volume to be owned by // fsGroup, and sets SetGid so that newly created files are owned by // fsGroup. If fsGroup is nil nothing is done. -func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error { +func SetVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error { if fsGroup == nil { return nil } timer := time.AfterFunc(30*time.Second, func() { - klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", mounter.GetPath()) + klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", dir) }) defer timer.Stop() - if skipPermissionChange(mounter, fsGroup, fsGroupChangePolicy) { - klog.V(3).InfoS("Skipping permission and ownership change for volume", "path", mounter.GetPath()) + if skipPermissionChange(mounter, dir, fsGroup, fsGroupChangePolicy) { + klog.V(3).InfoS("Skipping permission and ownership change for volume", "path", dir) return nil } - err := walkDeep(mounter.GetPath(), func(path string, info os.FileInfo, err error) error { + err := walkDeep(dir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } @@ -104,14 +104,12 @@ func changeFilePermission(filename string, fsGroup *int64, readonly bool, info o return nil } -func skipPermissionChange(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) bool { - dir := mounter.GetPath() - +func skipPermissionChange(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) bool { if fsGroupChangePolicy == nil || *fsGroupChangePolicy != v1.FSGroupChangeOnRootMismatch { klog.V(4).InfoS("Perform recursive ownership change for directory", "path", dir) return false } - return !requiresPermissionChange(mounter.GetPath(), fsGroup, mounter.GetAttributes().ReadOnly) + return !requiresPermissionChange(dir, fsGroup, mounter.GetAttributes().ReadOnly) } func requiresPermissionChange(rootDir string, fsGroup *int64, readonly bool) bool { diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go index 20c56d4b63e2..3b5a200a6160 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go @@ -24,6 +24,6 @@ import ( "k8s.io/kubernetes/pkg/volume/util/types" ) -func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error { +func SetVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error { return nil } diff --git a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go index 0660eed66bb3..9d5dd3a4a7c2 100644 --- a/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/cluster-autoscaler/vendor/k8s.io/kubernetes/pkg/volume/vsphere_volume/vsphere_volume.go @@ -277,7 +277,7 @@ func (b *vsphereVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArg os.Remove(dir) return err } - volume.SetVolumeOwnership(b, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) + volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil)) klog.V(3).Infof("vSphere volume %s mounted to %s", b.volPath, dir) return nil diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt index 5724f2472b13..64f5399a8908 100644 --- a/cluster-autoscaler/vendor/modules.txt +++ b/cluster-autoscaler/vendor/modules.txt @@ -482,8 +482,8 @@ github.com/mxk/go-flowrate/flowrate # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/runc v1.1.4 -## explicit; go 1.16 +# github.com/opencontainers/runc v1.1.6 +## explicit; go 1.17 github.com/opencontainers/runc/libcontainer github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/capabilities @@ -970,7 +970,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.27.1 => k8s.io/api v0.27.1 +# k8s.io/api v0.27.2 => k8s.io/api v0.27.2 ## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1026,7 +1026,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.27.1 => k8s.io/apimachinery v0.28.0-alpha.0 +# k8s.io/apimachinery v0.27.2 => k8s.io/apimachinery v0.27.2 ## explicit; go 1.20 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1087,7 +1087,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.27.1 => k8s.io/apiserver v0.27.1 +# k8s.io/apiserver v0.27.2 => k8s.io/apiserver v0.27.2 ## explicit; go 1.20 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -1229,7 +1229,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.27.1 => k8s.io/client-go v0.27.1 +# k8s.io/client-go v0.27.2 => k8s.io/client-go v0.27.2 ## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1555,7 +1555,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.27.1 => k8s.io/cloud-provider v0.27.1 +# k8s.io/cloud-provider v0.27.2 => k8s.io/cloud-provider v0.27.2 ## explicit; go 1.20 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -1577,7 +1577,7 @@ k8s.io/cloud-provider/volume/helpers # k8s.io/cloud-provider-aws v1.27.0 ## explicit; go 1.20 k8s.io/cloud-provider-aws/pkg/providers/v1 -# k8s.io/component-base v0.27.1 => k8s.io/component-base v0.27.1 +# k8s.io/component-base v0.27.2 => k8s.io/component-base v0.27.2 ## explicit; go 1.20 k8s.io/component-base/cli/flag k8s.io/component-base/codec @@ -1605,7 +1605,7 @@ k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/component-helpers v0.27.1 => k8s.io/component-helpers v0.27.1 +# k8s.io/component-helpers v0.27.2 => k8s.io/component-helpers v0.27.2 ## explicit; go 1.20 k8s.io/component-helpers/apimachinery/lease k8s.io/component-helpers/node/topology @@ -1615,7 +1615,7 @@ k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/ephemeral k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.27.1 => k8s.io/controller-manager v0.27.1 +# k8s.io/controller-manager v0.27.2 => k8s.io/controller-manager v0.27.2 ## explicit; go 1.20 k8s.io/controller-manager/config k8s.io/controller-manager/config/v1 @@ -1632,11 +1632,11 @@ k8s.io/controller-manager/pkg/leadermigration/options k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1 k8s.io/cri-api/pkg/errors -# k8s.io/csi-translation-lib v0.27.0 => k8s.io/csi-translation-lib v0.27.1 +# k8s.io/csi-translation-lib v0.27.0 => k8s.io/csi-translation-lib v0.27.2 ## explicit; go 1.20 k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins -# k8s.io/dynamic-resource-allocation v0.0.0 => k8s.io/dynamic-resource-allocation v0.27.1 +# k8s.io/dynamic-resource-allocation v0.0.0 => k8s.io/dynamic-resource-allocation v0.27.2 ## explicit; go 1.20 k8s.io/dynamic-resource-allocation/resourceclaim # k8s.io/klog/v2 v2.90.1 @@ -1647,13 +1647,13 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kms v0.27.1 => k8s.io/kms v0.27.1 +# k8s.io/kms v0.27.2 => k8s.io/kms v0.27.2 ## explicit; go 1.20 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-openapi v0.0.0-20230327201221-f5883ff37f0c +# k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f ## explicit; go 1.19 k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/builder3 @@ -1675,19 +1675,19 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.27.1 +# k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.27.2 ## explicit; go 1.20 k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.27.1 +# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.27.2 ## explicit; go 1.20 k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/config/v1beta2 k8s.io/kube-scheduler/config/v1beta3 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.27.1 +# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.27.2 ## explicit; go 1.20 k8s.io/kubectl/pkg/scale -# k8s.io/kubelet v0.27.1 => k8s.io/kubelet v0.27.1 +# k8s.io/kubelet v0.27.2 => k8s.io/kubelet v0.27.2 ## explicit; go 1.20 k8s.io/kubelet/config/v1 k8s.io/kubelet/config/v1alpha1 @@ -1704,7 +1704,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.27.1 +# k8s.io/kubernetes v1.27.2 ## explicit; go 1.20 k8s.io/kubernetes/cmd/kube-proxy/app k8s.io/kubernetes/cmd/kubelet/app @@ -1976,7 +1976,7 @@ k8s.io/kubernetes/pkg/volume/vsphere_volume k8s.io/kubernetes/pkg/windows/service k8s.io/kubernetes/test/utils k8s.io/kubernetes/third_party/forked/golang/expansion -# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.27.1 +# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.27.2 ## explicit; go 1.20 k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure/auth @@ -2018,7 +2018,7 @@ k8s.io/legacy-cloud-providers/gce/gcpcredential k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/mount-utils v0.26.0-alpha.0 => k8s.io/mount-utils v0.27.1 +# k8s.io/mount-utils v0.26.0-alpha.0 => k8s.io/mount-utils v0.27.2 ## explicit; go 1.20 k8s.io/mount-utils # k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 @@ -2118,33 +2118,33 @@ sigs.k8s.io/yaml # github.com/aws/aws-sdk-go/service/eks => github.com/aws/aws-sdk-go/service/eks v1.38.49 # github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0 # github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0 -# k8s.io/api => k8s.io/api v0.27.1 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.1 -# k8s.io/apimachinery => k8s.io/apimachinery v0.28.0-alpha.0 -# k8s.io/apiserver => k8s.io/apiserver v0.27.1 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.1 -# k8s.io/client-go => k8s.io/client-go v0.27.1 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.1 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.1 -# k8s.io/code-generator => k8s.io/code-generator v0.27.1 -# k8s.io/component-base => k8s.io/component-base v0.27.1 -# k8s.io/component-helpers => k8s.io/component-helpers v0.27.1 -# k8s.io/controller-manager => k8s.io/controller-manager v0.27.1 +# k8s.io/api => k8s.io/api v0.27.2 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.2 +# k8s.io/apimachinery => k8s.io/apimachinery v0.27.2 +# k8s.io/apiserver => k8s.io/apiserver v0.27.2 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.2 +# k8s.io/client-go => k8s.io/client-go v0.27.2 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.2 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.2 +# k8s.io/code-generator => k8s.io/code-generator v0.27.2 +# k8s.io/component-base => k8s.io/component-base v0.27.2 +# k8s.io/component-helpers => k8s.io/component-helpers v0.27.2 +# k8s.io/controller-manager => k8s.io/controller-manager v0.27.2 # k8s.io/cri-api => k8s.io/cri-api v0.28.0-alpha.0 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.1 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.1 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.1 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.1 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.1 -# k8s.io/kubectl => k8s.io/kubectl v0.27.1 -# k8s.io/kubelet => k8s.io/kubelet v0.27.1 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.1 -# k8s.io/metrics => k8s.io/metrics v0.27.1 -# k8s.io/mount-utils => k8s.io/mount-utils v0.27.1 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.1 -# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.1 -# k8s.io/sample-controller => k8s.io/sample-controller v0.27.1 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.1 -# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.1 -# k8s.io/kms => k8s.io/kms v0.27.1 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.2 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.2 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.2 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.2 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.2 +# k8s.io/kubectl => k8s.io/kubectl v0.27.2 +# k8s.io/kubelet => k8s.io/kubelet v0.27.2 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.2 +# k8s.io/metrics => k8s.io/metrics v0.27.2 +# k8s.io/mount-utils => k8s.io/mount-utils v0.27.2 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.2 +# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.2 +# k8s.io/sample-controller => k8s.io/sample-controller v0.27.2 +# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.2 +# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.2 +# k8s.io/kms => k8s.io/kms v0.27.2 # k8s.io/noderesourcetopology-api => k8s.io/noderesourcetopology-api v0.27.0 diff --git a/cluster-autoscaler/version/version.go b/cluster-autoscaler/version/version.go index fbbb44bacd80..5625be6222d7 100644 --- a/cluster-autoscaler/version/version.go +++ b/cluster-autoscaler/version/version.go @@ -17,4 +17,4 @@ limitations under the License. package version // ClusterAutoscalerVersion contains version of CA. -const ClusterAutoscalerVersion = "1.27.1" +const ClusterAutoscalerVersion = "1.27.2"