Skip to content

Commit

Permalink
Merge pull request #3981 from vincepri/cr070
Browse files Browse the repository at this point in the history
🌱 Update Controller Runtime to v0.7.0-alpha.8
  • Loading branch information
k8s-ci-robot authored Dec 3, 2020
2 parents 8441b28 + b21d303 commit ca2cf1a
Show file tree
Hide file tree
Showing 29 changed files with 150 additions and 121 deletions.
2 changes: 2 additions & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ issues:
exclude:
- Using the variable on range scope `(tc)|(rt)|(tt)|(test)|(testcase)|(testCase)` in function literal
- "G108: Profiling endpoint is automatically exposed on /debug/pprof"
- "fake.NewFakeClientWithScheme is deprecated: Please use NewClientBuilder instead."
- "fake.NewFakeClient is deprecated: Please use NewClientBuilder instead."
run:
timeout: 10m
skip-files:
Expand Down
22 changes: 11 additions & 11 deletions bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,27 +65,27 @@ func TestControlPlaneInitMutex_Lock(t *testing.T) {
{
name: "should successfully acquire lock if the config cannot be found",
client: &fakeClient{
Client: fake.NewFakeClientWithScheme(scheme),
Client: fake.NewClientBuilder().WithScheme(scheme).Build(),
getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)),
},
shouldAcquire: true,
},
{
name: "should not acquire lock if already exits",
client: &fakeClient{
Client: fake.NewFakeClientWithScheme(scheme, &corev1.ConfigMap{
Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configMapName(clusterName),
Namespace: clusterNamespace,
},
}),
}).Build(),
},
shouldAcquire: false,
},
{
name: "should not acquire lock if cannot create config map",
client: &fakeClient{
Client: fake.NewFakeClientWithScheme(scheme),
Client: fake.NewClientBuilder().WithScheme(scheme).Build(),
getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, configMapName(clusterName)),
createError: errors.New("create error"),
},
Expand All @@ -94,7 +94,7 @@ func TestControlPlaneInitMutex_Lock(t *testing.T) {
{
name: "should not acquire lock if config map already exists while creating",
client: &fakeClient{
Client: fake.NewFakeClientWithScheme(scheme),
Client: fake.NewClientBuilder().WithScheme(scheme).Build(),
getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)),
createError: apierrors.NewAlreadyExists(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)),
},
Expand Down Expand Up @@ -151,30 +151,30 @@ func TestControlPlaneInitMutex_UnLock(t *testing.T) {
{
name: "should release lock by deleting config map",
client: &fakeClient{
Client: fake.NewFakeClientWithScheme(scheme),
Client: fake.NewClientBuilder().WithScheme(scheme).Build(),
},
shouldRelease: true,
},
{
name: "should not release lock if cannot delete config map",
client: &fakeClient{
Client: fake.NewFakeClientWithScheme(scheme, configMap),
Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(configMap).Build(),
deleteError: errors.New("delete error"),
},
shouldRelease: false,
},
{
name: "should release lock if config map does not exist",
client: &fakeClient{
Client: fake.NewFakeClientWithScheme(scheme),
Client: fake.NewClientBuilder().WithScheme(scheme).Build(),
getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)),
},
shouldRelease: true,
},
{
name: "should not release lock if error while getting config map",
client: &fakeClient{
Client: fake.NewFakeClientWithScheme(scheme),
Client: fake.NewClientBuilder().WithScheme(scheme).Build(),
getError: errors.New("get error"),
},
shouldRelease: false,
Expand Down Expand Up @@ -217,13 +217,13 @@ func TestInfoLines_Lock(t *testing.T) {
g.Expect(err).NotTo(HaveOccurred())

c := &fakeClient{
Client: fake.NewFakeClientWithScheme(scheme, &corev1.ConfigMap{
Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configMapName(clusterName),
Namespace: clusterNamespace,
},
Data: map[string]string{semaphoreInformationKey: string(b)},
}),
}).Build(),
}

logtester := &logtests{
Expand Down
2 changes: 0 additions & 2 deletions bootstrap/kubeadm/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ import (
"sigs.k8s.io/cluster-api/cmd/version"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4"
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller"
// +kubebuilder:scaffold:imports
Expand Down Expand Up @@ -133,7 +132,6 @@ func main() {
RetryPeriod: &leaderElectionRetryPeriod,
Namespace: watchNamespace,
SyncPeriod: &syncPeriod,
NewClient: util.ManagerDelegatingClientFunc,
Port: webhookPort,
})
if err != nil {
Expand Down
8 changes: 4 additions & 4 deletions bootstrap/util/configowner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ func TestGetConfigOwner(t *testing.T) {
},
}

c := fake.NewFakeClientWithScheme(scheme, myMachine)
c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(myMachine).Build()
obj := &bootstrapv1.KubeadmConfig{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
Expand Down Expand Up @@ -107,7 +107,7 @@ func TestGetConfigOwner(t *testing.T) {
},
}

c := fake.NewFakeClientWithScheme(scheme, myPool)
c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(myPool).Build()
obj := &bootstrapv1.KubeadmConfig{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
Expand All @@ -132,7 +132,7 @@ func TestGetConfigOwner(t *testing.T) {

t.Run("return an error when not found", func(t *testing.T) {
g := NewWithT(t)
c := fake.NewFakeClientWithScheme(scheme)
c := fake.NewClientBuilder().WithScheme(scheme).Build()
obj := &bootstrapv1.KubeadmConfig{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{
Expand All @@ -152,7 +152,7 @@ func TestGetConfigOwner(t *testing.T) {

t.Run("return nothing when there is no owner", func(t *testing.T) {
g := NewWithT(t)
c := fake.NewFakeClientWithScheme(scheme)
c := fake.NewClientBuilder().WithScheme(scheme).Build()
obj := &bootstrapv1.KubeadmConfig{
ObjectMeta: metav1.ObjectMeta{
OwnerReferences: []metav1.OwnerReference{},
Expand Down
3 changes: 1 addition & 2 deletions cmd/clusterctl/internal/test/fake_proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,7 @@ func (f *FakeProxy) NewClient() (client.Client, error) {
if f.cs != nil {
return f.cs, nil
}
f.cs = fake.NewFakeClientWithScheme(FakeScheme, f.objs...)

f.cs = fake.NewClientBuilder().WithScheme(FakeScheme).WithObjects(f.objs...).Build()
return f.cs, nil
}

Expand Down
4 changes: 2 additions & 2 deletions controllers/cluster_controller_phases_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,9 @@ func TestClusterReconcilePhases(t *testing.T) {
var c client.Client
if tt.infraRef != nil {
infraConfig := &unstructured.Unstructured{Object: tt.infraRef}
c = fake.NewFakeClientWithScheme(scheme.Scheme, external.TestGenericInfrastructureCRD.DeepCopy(), tt.cluster, infraConfig)
c = fake.NewClientBuilder().WithObjects(external.TestGenericInfrastructureCRD.DeepCopy(), tt.cluster, infraConfig).Build()
} else {
c = fake.NewFakeClientWithScheme(scheme.Scheme, external.TestGenericInfrastructureCRD.DeepCopy(), tt.cluster)
c = fake.NewClientBuilder().WithObjects(external.TestGenericInfrastructureCRD.DeepCopy(), tt.cluster).Build()
}
r := &ClusterReconciler{
Client: c,
Expand Down
2 changes: 1 addition & 1 deletion controllers/machine_controller_noderef_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func TestGetNodeReference(t *testing.T) {
},
}

client := fake.NewFakeClientWithScheme(scheme.Scheme, nodeList...)
client := fake.NewClientBuilder().WithObjects(nodeList...).Build()

testCases := []struct {
name string
Expand Down
6 changes: 3 additions & 3 deletions controllers/machinedeployment_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,7 @@ func TestMachineSetToDeployments(t *testing.T) {

g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed())
r := &MachineDeploymentReconciler{
Client: fake.NewFakeClientWithScheme(scheme.Scheme, machineDeplopymentList...),
Client: fake.NewClientBuilder().WithObjects(machineDeplopymentList...).Build(),
recorder: record.NewFakeRecorder(32),
}

Expand Down Expand Up @@ -527,7 +527,7 @@ func TestGetMachineDeploymentsForMachineSet(t *testing.T) {

g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed())
r := &MachineDeploymentReconciler{
Client: fake.NewFakeClientWithScheme(scheme.Scheme, append(machineDeploymentList, &ms1, &ms2)...),
Client: fake.NewClientBuilder().WithObjects(append(machineDeploymentList, &ms1, &ms2)...).Build(),
recorder: record.NewFakeRecorder(32),
}

Expand Down Expand Up @@ -687,7 +687,7 @@ func TestGetMachineSetsForDeployment(t *testing.T) {
g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed())

r := &MachineDeploymentReconciler{
Client: fake.NewFakeClientWithScheme(scheme.Scheme, machineSetList...),
Client: fake.NewClientBuilder().WithObjects(machineSetList...).Build(),
recorder: record.NewFakeRecorder(32),
}

Expand Down
2 changes: 1 addition & 1 deletion controllers/machinehealthcheck_targets_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ func TestGetTargetsFromMHC(t *testing.T) {
gs := NewGomegaWithT(t)

gs.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed())
k8sClient := fake.NewFakeClientWithScheme(scheme.Scheme, tc.toCreate...)
k8sClient := fake.NewClientBuilder().WithObjects(tc.toCreate...).Build()

// Create a test reconciler
reconciler := &MachineHealthCheckReconciler{
Expand Down
2 changes: 1 addition & 1 deletion controllers/machineset_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -484,7 +484,7 @@ func TestMachineSetToMachines(t *testing.T) {
g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed())

r := &MachineSetReconciler{
Client: fake.NewFakeClientWithScheme(scheme.Scheme, append(machineSetList, &m, &m2, &m3)...),
Client: fake.NewClientBuilder().WithObjects(append(machineSetList, &m, &m2, &m3)...).Build(),
}

for _, tc := range testsCases {
Expand Down
27 changes: 4 additions & 23 deletions controllers/remote/cluster_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,28 +68,6 @@ func NewClusterCacheTracker(log logr.Logger, manager ctrl.Manager) (*ClusterCach
}, nil
}

// NewTestClusterCacheTracker creates a new fake ClusterCacheTracker that can be used by unit tests with fake client.
func NewTestClusterCacheTracker(log logr.Logger, cl client.Client, scheme *runtime.Scheme, objKey client.ObjectKey, watchObjects ...string) *ClusterCacheTracker {
testCacheTracker := &ClusterCacheTracker{
log: log,
client: cl,
scheme: scheme,
clusterAccessors: make(map[client.ObjectKey]*clusterAccessor),
}

delegatingClient := client.NewDelegatingClient(client.NewDelegatingClientInput{
CacheReader: cl,
Client: cl,
})
testCacheTracker.clusterAccessors[objKey] = &clusterAccessor{

cache: nil,
delegatingClient: delegatingClient,
watches: sets.NewString(watchObjects...),
}
return testCacheTracker
}

// GetClient returns a cached client for the given cluster.
func (t *ClusterCacheTracker) GetClient(ctx context.Context, cluster client.ObjectKey) (client.Client, error) {
t.lock.Lock()
Expand Down Expand Up @@ -199,10 +177,13 @@ func (t *ClusterCacheTracker) newClusterAccessor(ctx context.Context, cluster cl
cfg: config,
})

delegatingClient := client.NewDelegatingClient(client.NewDelegatingClientInput{
delegatingClient, err := client.NewDelegatingClient(client.NewDelegatingClientInput{
CacheReader: cache,
Client: c,
})
if err != nil {
return nil, err
}

return &clusterAccessor{
cache: cache,
Expand Down
50 changes: 50 additions & 0 deletions controllers/remote/cluster_cache_fake.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package remote

import (
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// NewTestClusterCacheTracker creates a new fake ClusterCacheTracker that can be used by unit tests with fake client.
func NewTestClusterCacheTracker(log logr.Logger, cl client.Client, scheme *runtime.Scheme, objKey client.ObjectKey, watchObjects ...string) *ClusterCacheTracker {
testCacheTracker := &ClusterCacheTracker{
log: log,
client: cl,
scheme: scheme,
clusterAccessors: make(map[client.ObjectKey]*clusterAccessor),
}

delegatingClient, err := client.NewDelegatingClient(client.NewDelegatingClientInput{
CacheReader: cl,
Client: cl,
})
if err != nil {
panic(err)
}

testCacheTracker.clusterAccessors[objKey] = &clusterAccessor{

cache: nil,
delegatingClient: delegatingClient,
watches: sets.NewString(watchObjects...),
}
return testCacheTracker
}
13 changes: 6 additions & 7 deletions controlplane/kubeadm/internal/workload_cluster_coredns_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4"
kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4"
Expand Down Expand Up @@ -480,7 +479,7 @@ func TestUpdateCoreDNSCorefile(t *testing.T) {
t.Run("returns error if migrate failed to update corefile", func(t *testing.T) {
g := NewWithT(t)
objs := []client.Object{depl, cm}
fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme, objs...)
fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build()
fakeMigrator := &fakeMigrator{
migrateErr: errors.New("failed to migrate"),
}
Expand Down Expand Up @@ -512,7 +511,7 @@ func TestUpdateCoreDNSCorefile(t *testing.T) {
// Not including the deployment so as to fail early and verify that
// the intermediate config map update occurred
objs := []client.Object{cm}
fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme, objs...)
fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build()
fakeMigrator := &fakeMigrator{
migratedCorefile: "updated-core-file",
}
Expand Down Expand Up @@ -544,7 +543,7 @@ func TestUpdateCoreDNSCorefile(t *testing.T) {

g := NewWithT(t)
objs := []client.Object{depl, cm}
fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme, objs...)
fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build()
fakeMigrator := &fakeMigrator{
migratedCorefile: "updated-core-file",
}
Expand Down Expand Up @@ -747,7 +746,7 @@ func TestGetCoreDNSInfo(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme, tt.objs...)
fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build()
w := &Workload{
Client: fakeClient,
}
Expand Down Expand Up @@ -857,7 +856,7 @@ scheduler: {}`,
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme, tt.objs...)
fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build()
w := &Workload{
Client: fakeClient,
}
Expand Down Expand Up @@ -961,7 +960,7 @@ func TestUpdateCoreDNSDeployment(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme, tt.objs...)
fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build()

w := &Workload{
Client: fakeClient,
Expand Down
Loading

0 comments on commit ca2cf1a

Please sign in to comment.