Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update VPA dependencies to k8s 1.16.2 #2486

Merged
merged 7 commits into from
Oct 31, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
The diff you're trying to view is too large. We only load the first 3000 changed files.
2 changes: 1 addition & 1 deletion vertical-pod-autoscaler/builder/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

FROM golang:1.11.1
FROM golang:1.12.10
LABEL maintainer="Beata Skiba <[email protected]>"

ENV GOPATH /gopath/
Expand Down
3 changes: 2 additions & 1 deletion vertical-pod-autoscaler/e2e/v1/actuation.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
Expand Down Expand Up @@ -120,7 +121,7 @@ func assertPodsPendingForDuration(c clientset.Interface, deployment *appsv1.Depl

err := wait.PollImmediate(pollInterval, pollTimeout+pendingDuration, func() (bool, error) {
var err error
currentPodList, err := framework.GetPodsForDeployment(c, deployment)
currentPodList, err := framework_deployment.GetPodsForDeployment(c, deployment)
if err != nil {
return false, err
}
Expand Down
5 changes: 3 additions & 2 deletions vertical-pod-autoscaler/e2e/v1/admission_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
apiv1 "k8s.io/api/core/v1"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
"k8s.io/kubernetes/test/e2e/framework"
framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
Expand Down Expand Up @@ -497,10 +498,10 @@ func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment)
c, ns := f.ClientSet, f.Namespace.Name
deployment, err := c.AppsV1().Deployments(ns).Create(deployment)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = framework.WaitForDeploymentComplete(c, deployment)
err = framework_deployment.WaitForDeploymentComplete(c, deployment)
gomega.Expect(err).NotTo(gomega.HaveOccurred())

podList, err := framework.GetPodsForDeployment(c, deployment)
podList, err := framework_deployment.GetPodsForDeployment(c, deployment)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
return podList
}
191 changes: 42 additions & 149 deletions vertical-pod-autoscaler/e2e/v1/autoscaling_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

// This is a fork of k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go
// This is a cut down fork of k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go

package autoscaling

Expand All @@ -25,20 +25,20 @@ import (
"sync"
"time"

autoscalingv1 "k8s.io/api/autoscaling/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/replicaset"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
testutils "k8s.io/kubernetes/test/utils"

ginkgo "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo"
scaleclient "k8s.io/client-go/scale"
imageutils "k8s.io/kubernetes/test/utils/image"
)

Expand Down Expand Up @@ -78,7 +78,7 @@ var (
)

/*
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported)
typical use case:
rc.ConsumeCPU(600)
// ... check your assumption here
Expand All @@ -91,7 +91,7 @@ type ResourceConsumer struct {
kind schema.GroupVersionKind
nsName string
clientSet clientset.Interface
internalClientset *internalclientset.Clientset
scaleClient scaleclient.ScalesGetter
cpu chan int
mem chan int
customMetric chan int
Expand All @@ -106,41 +106,35 @@ type ResourceConsumer struct {
requestSizeCustomMetric int
}

// GetResourceConsumerImage func
func GetResourceConsumerImage() string {
return resourceConsumerImage
}

// NewDynamicResourceConsumer func
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuRequest, memRequest resource.Quantity, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuRequest, memRequest int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuRequest, memRequest, clientset, internalClientset)
}

// NewStaticResourceConsumer TODO this still defaults to replication controller
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuRequest, memRequest resource.Quantity, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuRequest, memRequest, clientset, internalClientset)
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuRequest, memRequest, clientset, scaleClient, nil, nil)
}

/*
NewResourceConsumer creates new ResourceConsumer
newResourceConsumer creates new ResourceConsumer
initCPUTotal argument is in millicores
initMemoryTotal argument is in megabytes
memRequest argument is in megabytes, it specifies the original Pod resource request
cpuRequest argument is in millicores, it specifies the original Pod resource request
memRequest argument is in megabytes, memRequest is a maximum amount of memory that can be consumed by a single pod
cpuRequest argument is in millicores, cpuRequest is a maximum amount of cpu that can be consumed by a single pod
*/
func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuRequest, memRequest resource.Quantity, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {

runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuRequest, memRequest)
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuRequest, memRequest int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer {
if podAnnotations == nil {
podAnnotations = make(map[string]string)
}
if serviceAnnotations == nil {
serviceAnnotations = make(map[string]string)
}
runServiceAndWorkloadForResourceConsumer(clientset, nsName, name, kind, replicas, cpuRequest, memRequest, podAnnotations, serviceAnnotations)
rc := &ResourceConsumer{
name: name,
controllerName: name + "-ctrl",
kind: kind,
nsName: nsName,
clientSet: clientset,
internalClientset: internalClientset,
scaleClient: scaleClient,
cpu: make(chan int),
mem: make(chan int),
customMetric: make(chan int),
Expand Down Expand Up @@ -232,7 +226,7 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() {
delta := 0
for {
select {
case delta := <-rc.customMetric:
case delta = <-rc.customMetric:
framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
case <-time.After(sleepTime):
framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
Expand All @@ -250,7 +244,7 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
defer cancel()

err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
Expand All @@ -277,7 +271,7 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
defer cancel()

err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
Expand All @@ -304,7 +298,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
defer cancel()

err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
framework.ExpectNoError(err)
req := proxyRequest.Namespace(rc.nsName).
Context(ctx).
Expand All @@ -325,83 +319,6 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
framework.ExpectNoError(err)
}

// GetReplicas func
func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind {
case KindRC:
replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if replicationController == nil {
framework.Failf(rcIsNil)
}
return int(replicationController.Status.ReadyReplicas)
case KindDeployment:
deployment, err := rc.clientSet.ExtensionsV1beta1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if deployment == nil {
framework.Failf(deploymentIsNil)
}
return int(deployment.Status.ReadyReplicas)
case KindReplicaSet:
rs, err := rc.clientSet.ExtensionsV1beta1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if rs == nil {
framework.Failf(rsIsNil)
}
return int(rs.Status.ReadyReplicas)
default:
framework.Failf(invalidKind)
}
return 0
}

// WaitForReplicas func
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) {
interval := 20 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
replicas := rc.GetReplicas()
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
})
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
}

// EnsureDesiredReplicas func
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration) {
interval := 10 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
replicas := rc.GetReplicas()
framework.Logf("expecting there to be %d replicas (are: %d)", desiredReplicas, replicas)
if replicas != desiredReplicas {
return false, fmt.Errorf("number of replicas changed unexpectedly")
}
return false, nil // Expected number of replicas found. Continue polling until timeout.
})
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
if err == wait.ErrWaitTimeout {
framework.Logf("Number of replicas was stable over %v", duration)
return
}
framework.ExpectNoErrorWithOffset(1, err)
}

// Pause stops background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) Pause() {
ginkgo.By(fmt.Sprintf("HPA pausing RC %s", rc.name))
rc.stopCPU <- 0
rc.stopMem <- 0
rc.stopCustomMetric <- 0
rc.stopWaitGroup.Wait()
}

// Resume starts background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) Resume() {
ginkgo.By(fmt.Sprintf("HPA resuming RC %s", rc.name))
go rc.makeConsumeCPURequests()
go rc.makeConsumeMemRequests()
go rc.makeConsumeCustomMetric()
}

// CleanUp func
func (rc *ResourceConsumer) CleanUp() {
ginkgo.By(fmt.Sprintf("Removing consuming RC %s", rc.name))
Expand All @@ -418,11 +335,12 @@ func (rc *ResourceConsumer) CleanUp() {
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil))
}

func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuRequest, memRequest resource.Quantity) {
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuRequestMillis, memRequestMb int64, podAnnotations, serviceAnnotations map[string]string) {
ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Name: name,
Annotations: serviceAnnotations,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Expand All @@ -438,15 +356,15 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalCli
framework.ExpectNoError(err)

rcConfig := testutils.RCConfig{
Client: c,
InternalClient: internalClient,
Image: resourceConsumerImage,
Name: name,
Namespace: ns,
Timeout: timeoutRC,
Replicas: replicas,
CpuRequest: cpuRequest.MilliValue(),
MemRequest: memRequest.Value(),
Client: c,
Image: resourceConsumerImage,
Name: name,
Namespace: ns,
Timeout: timeoutRC,
Replicas: replicas,
CpuRequest: cpuRequestMillis,
MemRequest: memRequestMb * 1024 * 1024, // MemLimit is in bytes
Annotations: podAnnotations,
}

switch kind {
Expand All @@ -457,14 +375,17 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalCli
dpConfig := testutils.DeploymentConfig{
RCConfig: rcConfig,
}
framework.ExpectNoError(framework.RunDeployment(dpConfig))
ginkgo.By(fmt.Sprintf("creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace))
dpConfig.NodeDumpFunc = framework.DumpNodeDebugInfo
dpConfig.ContainerDumpFunc = framework.LogFailedContainers
framework.ExpectNoError(testutils.RunDeployment(dpConfig))
break
case KindReplicaSet:
rsConfig := testutils.ReplicaSetConfig{
RCConfig: rcConfig,
}
ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace))
framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
framework.ExpectNoError(replicaset.RunReplicaSet(rsConfig))
break
default:
framework.Failf(invalidKind)
Expand Down Expand Up @@ -506,31 +427,3 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalCli
framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
}

// CreateCPUHorizontalPodAutoscaler func
func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) *autoscalingv1.HorizontalPodAutoscaler {
hpa := &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: rc.name,
Namespace: rc.nsName,
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
APIVersion: rc.kind.GroupVersion().String(),
Kind: rc.kind.Kind,
Name: rc.name,
},
MinReplicas: &minReplicas,
MaxReplicas: maxRepl,
TargetCPUUtilizationPercentage: &cpu,
},
}
hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(hpa)
framework.ExpectNoError(errHPA)
return hpa
}

// DeleteHorizontalPodAutoscaler func
func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil)
}
5 changes: 3 additions & 2 deletions vertical-pod-autoscaler/e2e/v1/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
"k8s.io/kubernetes/test/e2e/framework"
framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment"
)

const (
Expand Down Expand Up @@ -105,15 +106,15 @@ func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas
d.Spec.Replicas = &replicas
d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(d)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when starting deployment creation")
err = framework.WaitForDeploymentComplete(f.ClientSet, d)
err = framework_deployment.WaitForDeploymentComplete(f.ClientSet, d)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error waiting for deployment creation to finish")
return d
}

// NewHamsterDeployment creates a simple hamster deployment for e2e test
// purposes.
func NewHamsterDeployment(f *framework.Framework) *appsv1.Deployment {
d := framework.NewDeployment("hamster-deployment", defaultHamsterReplicas, hamsterLabels, "hamster", "k8s.gcr.io/ubuntu-slim:0.1", appsv1.RollingUpdateDeploymentStrategyType)
d := framework_deployment.NewDeployment("hamster-deployment", defaultHamsterReplicas, hamsterLabels, "hamster", "k8s.gcr.io/ubuntu-slim:0.1", appsv1.RollingUpdateDeploymentStrategyType)
d.ObjectMeta.Namespace = f.Namespace.Name
d.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh"}
d.Spec.Template.Spec.Containers[0].Args = []string{"-c", "/usr/bin/yes >/dev/null"}
Expand Down
Loading