From 5f0b24770d430dc3206746ba1857c4ef51102ab5 Mon Sep 17 00:00:00 2001 From: Ahmad Nurus S Date: Tue, 22 Jun 2021 19:55:19 +0700 Subject: [PATCH] Refactor cloud services into Reconcile/Delete pattern --- cloud/defaults.go | 22 + cloud/doc.go | 18 + cloud/gcperrors/errors.go | 10 + cloud/interfaces.go | 98 +++++ cloud/scope/clients.go | 43 +- cloud/scope/cluster.go | 274 ++++++++++--- cloud/scope/machine.go | 221 ++++++++-- cloud/services/compute/firewalls.go | 118 ------ cloud/services/compute/firewalls/doc.go | 18 + cloud/services/compute/firewalls/reconcile.go | 67 ++++ cloud/services/compute/firewalls/service.go | 55 +++ cloud/services/compute/instancegroup.go | 147 ------- cloud/services/compute/instances.go | 261 ------------ cloud/services/compute/instances/doc.go | 18 + cloud/services/compute/instances/reconcile.go | 208 ++++++++++ .../compute/instances/reconcile_test.go | 228 +++++++++++ cloud/services/compute/instances/service.go | 65 +++ cloud/services/compute/loadbalancers.go | 309 -------------- cloud/services/compute/loadbalancers/doc.go | 18 + .../compute/loadbalancers/reconcile.go | 379 ++++++++++++++++++ .../services/compute/loadbalancers/service.go | 103 +++++ cloud/services/compute/network.go | 172 -------- cloud/services/compute/networks/doc.go | 18 + cloud/services/compute/networks/reconcile.go | 149 +++++++ cloud/services/compute/networks/service.go | 63 +++ cloud/services/compute/regions.go | 47 --- cloud/services/compute/service.go | 81 ---- cloud/wait/wait.go | 81 ---- controllers/gcpcluster_controller.go | 191 ++++----- controllers/gcpcluster_controller_test.go | 2 - controllers/gcpmachine_controller.go | 349 +++++----------- controllers/gcpmachine_controller_test.go | 2 - .../gcpmachine_controller_unit_test.go | 9 +- go.mod | 9 +- go.sum | 9 +- main.go | 2 - 36 files changed, 2156 insertions(+), 1708 deletions(-) create mode 100644 cloud/defaults.go create mode 100644 cloud/doc.go create mode 100644 cloud/interfaces.go delete mode 100644 cloud/services/compute/firewalls.go create mode 100644 cloud/services/compute/firewalls/doc.go create mode 100644 cloud/services/compute/firewalls/reconcile.go create mode 100644 cloud/services/compute/firewalls/service.go delete mode 100644 cloud/services/compute/instancegroup.go delete mode 100644 cloud/services/compute/instances.go create mode 100644 cloud/services/compute/instances/doc.go create mode 100644 cloud/services/compute/instances/reconcile.go create mode 100644 cloud/services/compute/instances/reconcile_test.go create mode 100644 cloud/services/compute/instances/service.go delete mode 100644 cloud/services/compute/loadbalancers.go create mode 100644 cloud/services/compute/loadbalancers/doc.go create mode 100644 cloud/services/compute/loadbalancers/reconcile.go create mode 100644 cloud/services/compute/loadbalancers/service.go delete mode 100644 cloud/services/compute/network.go create mode 100644 cloud/services/compute/networks/doc.go create mode 100644 cloud/services/compute/networks/reconcile.go create mode 100644 cloud/services/compute/networks/service.go delete mode 100644 cloud/services/compute/regions.go delete mode 100644 cloud/services/compute/service.go delete mode 100644 cloud/wait/wait.go diff --git a/cloud/defaults.go b/cloud/defaults.go new file mode 100644 index 000000000..1b1eea703 --- /dev/null +++ b/cloud/defaults.go @@ -0,0 +1,22 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +const ( + // ProviderIDPrefix is the gce provider id prefix. + ProviderIDPrefix = "gce://" +) diff --git a/cloud/doc.go b/cloud/doc.go new file mode 100644 index 000000000..fe44ed3b2 --- /dev/null +++ b/cloud/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cloud implement cloud resources lifecycle. +package cloud diff --git a/cloud/gcperrors/errors.go b/cloud/gcperrors/errors.go index e4b7c3b67..77a480051 100644 --- a/cloud/gcperrors/errors.go +++ b/cloud/gcperrors/errors.go @@ -33,3 +33,13 @@ func IsNotFound(err error) bool { return ok && ae.Code == http.StatusNotFound } + +// IgnoreNotFound ignore Google API not found error and return nil. +// Otherwise return the actual error. +func IgnoreNotFound(err error) error { + if IsNotFound(err) { + return nil + } + + return err +} diff --git a/cloud/interfaces.go b/cloud/interfaces.go new file mode 100644 index 000000000..30d37a918 --- /dev/null +++ b/cloud/interfaces.go @@ -0,0 +1,98 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + + corev1 "k8s.io/api/core/v1" + infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + capierrors "sigs.k8s.io/cluster-api/errors" +) + +// Cloud alias for cloud.Cloud interface. +type Cloud = cloud.Cloud + +// Reconciler is a generic interface used by components offering a type of service. +type Reconciler interface { + Reconcile(ctx context.Context) error + Delete(ctx context.Context) error +} + +// Client is an interface which can get cloud client. +type Client interface { + Cloud() Cloud +} + +// ClusterGetter is an interface which can get cluster informations. +type ClusterGetter interface { + Client + Project() string + Region() string + Name() string + Namespace() string + NetworkName() string + Network() *infrav1.Network + AdditionalLabels() infrav1.Labels + FailureDomains() clusterv1.FailureDomains + ControlPlaneEndpoint() clusterv1.APIEndpoint +} + +// ClusterSetter is an interface which can set cluster informations. +type ClusterSetter interface { + SetControlPlaneEndpoint(endpoint clusterv1.APIEndpoint) +} + +// Cluster is an interface which can get and set cluster informations. +type Cluster interface { + ClusterGetter + ClusterSetter +} + +// MachineGetter is an interface which can get machine informations. +type MachineGetter interface { + Client + Name() string + Namespace() string + Zone() string + Role() string + IsControlPlane() bool + ControlPlaneGroupName() string + GetInstanceID() *string + GetProviderID() string + GetBootstrapData() (string, error) + GetInstanceStatus() *infrav1.InstanceStatus +} + +// MachineSetter is an interface which can set machine informations. +type MachineSetter interface { + SetProviderID() + SetInstanceStatus(v infrav1.InstanceStatus) + SetFailureMessage(v error) + SetFailureReason(v capierrors.MachineStatusError) + SetAnnotation(key, value string) + SetAddresses(addressList []corev1.NodeAddress) +} + +// Machine is an interface which can get and set machine informations. +type Machine interface { + MachineGetter + MachineSetter +} diff --git a/cloud/scope/clients.go b/cloud/scope/clients.go index 231dc711f..01b0b6b10 100644 --- a/cloud/scope/clients.go +++ b/cloud/scope/clients.go @@ -17,10 +17,47 @@ limitations under the License. package scope import ( + "context" + "time" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + computebeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" + + "k8s.io/client-go/util/flowcontrol" ) -// GCPClients contains all the gcp clients used by the scopes. -type GCPClients struct { - Compute *compute.Service +// GCPServices contains all the gcp services used by the scopes. +type GCPServices struct { + Compute *compute.Service + ComputeBeta *computebeta.Service +} + +// GCPRateLimiter implements cloud.RateLimiter. +type GCPRateLimiter struct{} + +// Accept blocks until the operation can be performed. +func (rl *GCPRateLimiter) Accept(ctx context.Context, key *cloud.RateLimitKey) error { + if key.Operation == "Get" && key.Service == "Operations" { + // Wait a minimum amount of time regardless of rate limiter. + rl := &cloud.MinimumRateLimiter{ + // Convert flowcontrol.RateLimiter into cloud.RateLimiter + RateLimiter: &cloud.AcceptRateLimiter{ + Acceptor: flowcontrol.NewTokenBucketRateLimiter(5, 5), // 5 + }, + Minimum: time.Second, + } + + return rl.Accept(ctx, key) + } + return nil +} + +func newCloud(project string, service GCPServices) cloud.Cloud { + return cloud.NewGCE(&cloud.Service{ + GA: service.Compute, + Beta: service.ComputeBeta, + ProjectRouter: &cloud.SingleProjectRouter{ID: project}, + RateLimiter: &GCPRateLimiter{}, + }) } diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index 663283154..608b1e362 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -19,22 +19,26 @@ package scope import ( "context" "fmt" + "strconv" + "time" - "github.com/go-logr/logr" "github.com/pkg/errors" + computebeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" - "k8s.io/klog/v2/klogr" - infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" + + "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-gcp/cloud" ) // ClusterScopeParams defines the input parameters used to create a new Scope. type ClusterScopeParams struct { - GCPClients + GCPServices Client client.Client - Logger logr.Logger Cluster *clusterv1.Cluster GCPCluster *infrav1.GCPCluster } @@ -49,17 +53,22 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { return nil, errors.New("failed to generate new scope from nil GCPCluster") } - if params.Logger == nil { - params.Logger = klogr.New() - } - computeSvc, err := compute.NewService(context.TODO()) if err != nil { return nil, errors.Errorf("failed to create gcp compute client: %v", err) } - if params.GCPClients.Compute == nil { - params.GCPClients.Compute = computeSvc + computeBetaSvc, err := computebeta.NewService(context.TODO()) + if err != nil { + return nil, errors.Errorf("failed to create gcp compute beta client: %v", err) + } + + if params.GCPServices.Compute == nil { + params.GCPServices.Compute = computeSvc + } + + if params.GCPServices.ComputeBeta == nil { + params.GCPServices.ComputeBeta = computeBetaSvc } helper, err := patch.NewHelper(params.GCPCluster, params.Client) @@ -68,24 +77,29 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { } return &ClusterScope{ - Logger: params.Logger, client: params.Client, - GCPClients: params.GCPClients, Cluster: params.Cluster, GCPCluster: params.GCPCluster, + GCPServices: params.GCPServices, patchHelper: helper, }, nil } // ClusterScope defines the basic context for an actuator to operate upon. type ClusterScope struct { - logr.Logger client client.Client patchHelper *patch.Helper - GCPClients Cluster *clusterv1.Cluster GCPCluster *infrav1.GCPCluster + GCPServices +} + +// ANCHOR: ClusterGetter + +// Cloud returns initialized cloud. +func (s *ClusterScope) Cloud() cloud.Cloud { + return newCloud(s.Project(), s.GCPServices) } // Project returns the current project name. @@ -93,18 +107,24 @@ func (s *ClusterScope) Project() string { return s.GCPCluster.Spec.Project } -// NetworkName returns the cluster network unique identifier. -func (s *ClusterScope) NetworkName() string { - if s.GCPCluster.Spec.Network.Name != nil { - return *s.GCPCluster.Spec.Network.Name - } +// Region returns the cluster region. +func (s *ClusterScope) Region() string { + return s.GCPCluster.Spec.Region +} - return "default" +// Name returns the cluster name. +func (s *ClusterScope) Name() string { + return s.Cluster.Name } -// NetworkSelfLink returns the full self link to the network. -func (s *ClusterScope) NetworkSelfLink() string { - return *s.GCPCluster.Status.Network.SelfLink +// Namespace returns the cluster namespace. +func (s *ClusterScope) Namespace() string { + return s.Cluster.Namespace +} + +// NetworkName returns the cluster network unique identifier. +func (s *ClusterScope) NetworkName() string { + return pointer.StringDeref(s.GCPCluster.Spec.Network.Name, "default") } // Network returns the cluster network object. @@ -112,58 +132,200 @@ func (s *ClusterScope) Network() *infrav1.Network { return &s.GCPCluster.Status.Network } -// Subnets returns the cluster subnets. -func (s *ClusterScope) Subnets() infrav1.Subnets { - return s.GCPCluster.Spec.Network.Subnets +// AdditionalLabels returns the cluster additional labels. +func (s *ClusterScope) AdditionalLabels() infrav1.Labels { + return s.GCPCluster.Spec.AdditionalLabels } -// Name returns the cluster name. -func (s *ClusterScope) Name() string { - return s.Cluster.Name +// ControlPlaneEndpoint returns the cluster control-plane endpoint. +func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { + endpoint := s.GCPCluster.Spec.ControlPlaneEndpoint + endpoint.Port = pointer.Int32Deref(s.Cluster.Spec.ClusterNetwork.APIServerPort, 443) + return endpoint } -// Namespace returns the cluster namespace. -func (s *ClusterScope) Namespace() string { - return s.Cluster.Namespace +// FailureDomains returns the cluster failure domains. +func (s *ClusterScope) FailureDomains() clusterv1.FailureDomains { + return s.GCPCluster.Status.FailureDomains } -// Region returns the cluster region. -func (s *ClusterScope) Region() string { - return s.GCPCluster.Spec.Region +// ANCHOR_END: ClusterGetter + +// ANCHOR: ClusterSetter + +// SetReady sets cluster ready status. +func (s *ClusterScope) SetReady() { + s.GCPCluster.Status.Ready = true +} + +// SetFailureDomains sets cluster failure domains. +func (s *ClusterScope) SetFailureDomains(fd clusterv1.FailureDomains) { + s.GCPCluster.Status.FailureDomains = fd +} + +// SetControlPlaneEndpoint sets cluster control-plane endpoint. +func (s *ClusterScope) SetControlPlaneEndpoint(endpoint clusterv1.APIEndpoint) { + s.GCPCluster.Spec.ControlPlaneEndpoint = endpoint +} + +// ANCHOR_END: ClusterSetter + +// ANCHOR: ClusterNetworkSpec + +// NetworkSpec returns google compute network spec. +func (s *ClusterScope) NetworkSpec() *compute.Network { + createSubnet := pointer.BoolDeref(s.GCPCluster.Spec.Network.AutoCreateSubnetworks, true) + network := &compute.Network{ + Name: s.NetworkName(), + Description: infrav1.ClusterTagKey(s.Name()), + AutoCreateSubnetworks: createSubnet, + } + + return network } -// LoadBalancerFrontendPort returns the loadbalancer frontend if specified -// in the cluster resource's network configuration. -func (s *ClusterScope) LoadBalancerFrontendPort() int64 { - if s.Cluster.Spec.ClusterNetwork.APIServerPort != nil { - return int64(*s.Cluster.Spec.ClusterNetwork.APIServerPort) +// NatRouterSpec returns google compute nat router spec. +func (s *ClusterScope) NatRouterSpec() *compute.Router { + networkSpec := s.NetworkSpec() + return &compute.Router{ + Name: fmt.Sprintf("%s-%s", networkSpec.Name, "router"), + Nats: []*compute.RouterNat{ + { + Name: fmt.Sprintf("%s-%s", networkSpec.Name, "nat"), + NatIpAllocateOption: "AUTO_ONLY", + SourceSubnetworkIpRangesToNat: "ALL_SUBNETWORKS_ALL_IP_RANGES", + }, + }, } +} - return 443 +// ANCHOR_END: ClusterNetworkSpec + +// ANCHOR: ClusterFirewallSpec + +// FirewallRulesSpec returns google compute firewall spec. +func (s *ClusterScope) FirewallRulesSpec() []*compute.Firewall { + network := s.Network() + firewallRules := []*compute.Firewall{ + { + Name: fmt.Sprintf("allow-%s-healthchecks", s.Name()), + Network: *network.SelfLink, + Allowed: []*compute.FirewallAllowed{ + { + IPProtocol: "TCP", + Ports: []string{ + strconv.FormatInt(6443, 10), + }, + }, + }, + Direction: "INGRESS", + SourceRanges: []string{ + "35.191.0.0/16", + "130.211.0.0/22", + }, + TargetTags: []string{ + fmt.Sprintf("%s-control-plane", s.Name()), + }, + }, + { + Name: fmt.Sprintf("allow-%s-cluster", s.Name()), + Network: *network.SelfLink, + Allowed: []*compute.FirewallAllowed{ + { + IPProtocol: "all", + }, + }, + Direction: "INGRESS", + SourceTags: []string{ + fmt.Sprintf("%s-control-plane", s.Name()), + fmt.Sprintf("%s-node", s.Name()), + }, + TargetTags: []string{ + fmt.Sprintf("%s-control-plane", s.Name()), + fmt.Sprintf("%s-node", s.Name()), + }, + }, + } + + return firewallRules } -// LoadBalancerBackendPort returns the loadbalancer backend if specified. -func (s *ClusterScope) LoadBalancerBackendPort() int64 { - if s.GCPCluster.Spec.Network.LoadBalancerBackendPort != nil { - return int64(*s.GCPCluster.Spec.Network.LoadBalancerBackendPort) +// ANCHOR_END: ClusterFirewallSpec + +// ANCHOR: ClusterControlPlaneSpec + +// AddressSpec returns google compute address spec. +func (s *ClusterScope) AddressSpec() *compute.Address { + return &compute.Address{ + Name: fmt.Sprintf("%s-%s", s.Name(), infrav1.APIServerRoleTagValue), + AddressType: "EXTERNAL", + IpVersion: "IPV4", } +} - return 6443 +// BackendServiceSpec returns google compute backend-service spec. +func (s *ClusterScope) BackendServiceSpec() *compute.BackendService { + return &compute.BackendService{ + Name: fmt.Sprintf("%s-%s", s.Name(), infrav1.APIServerRoleTagValue), + LoadBalancingScheme: "EXTERNAL", + PortName: "apiserver", + Protocol: "TCP", + TimeoutSec: int64((10 * time.Minute).Seconds()), + } } -// ControlPlaneConfigMapName returns the name of the ConfigMap used to -// coordinate the bootstrapping of control plane nodes. -func (s *ClusterScope) ControlPlaneConfigMapName() string { - return fmt.Sprintf("%s-controlplane", s.Cluster.UID) +// ForwardingRuleSpec returns google compute forwarding-rule spec. +func (s *ClusterScope) ForwardingRuleSpec() *compute.ForwardingRule { + port := pointer.Int32Deref(s.Cluster.Spec.ClusterNetwork.APIServerPort, 443) + portRange := fmt.Sprintf("%d-%d", port, port) + return &compute.ForwardingRule{ + Name: fmt.Sprintf("%s-%s", s.Name(), infrav1.APIServerRoleTagValue), + IPProtocol: "TCP", + LoadBalancingScheme: "EXTERNAL", + PortRange: portRange, + } } -// ListOptionsLabelSelector returns a ListOptions with a label selector for clusterName. -func (s *ClusterScope) ListOptionsLabelSelector() client.ListOption { - return client.MatchingLabels(map[string]string{ - clusterv1.ClusterLabelName: s.Cluster.Name, - }) +// HealthCheckSpec returns google compute health-check spec. +func (s *ClusterScope) HealthCheckSpec() *compute.HealthCheck { + return &compute.HealthCheck{ + Name: fmt.Sprintf("%s-%s", s.Name(), infrav1.APIServerRoleTagValue), + Type: "SSL", + SslHealthCheck: &compute.SSLHealthCheck{ + Port: 6443, + PortSpecification: "USE_FIXED_PORT", + }, + CheckIntervalSec: 10, + TimeoutSec: 5, + HealthyThreshold: 5, + UnhealthyThreshold: 3, + } +} + +// InstanceGroupSpec returns google compute instance-group spec. +func (s *ClusterScope) InstanceGroupSpec(zone string) *compute.InstanceGroup { + port := pointer.Int32Deref(s.GCPCluster.Spec.Network.LoadBalancerBackendPort, 6443) + return &compute.InstanceGroup{ + Name: fmt.Sprintf("%s-%s-%s", s.Name(), infrav1.APIServerRoleTagValue, zone), + NamedPorts: []*compute.NamedPort{ + { + Name: "apiserver", + Port: int64(port), + }, + }, + } } +// TargetTCPProxySpec returns google compute target-tcp-proxy spec. +func (s *ClusterScope) TargetTCPProxySpec() *computebeta.TargetTcpProxy { + return &computebeta.TargetTcpProxy{ + Name: fmt.Sprintf("%s-%s", s.Name(), infrav1.APIServerRoleTagValue), + ProxyHeader: "NONE", + } +} + +// ANCHOR_END: ClusterControlPlaneSpec + // PatchObject persists the cluster configuration and status. func (s *ClusterScope) PatchObject() error { return s.patchHelper.Patch(context.TODO(), s.GCPCluster) diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go index e84bfe03a..33a9bb332 100644 --- a/cloud/scope/machine.go +++ b/cloud/scope/machine.go @@ -19,16 +19,20 @@ package scope import ( "context" + "fmt" + "path" + "strings" - "github.com/go-logr/logr" "github.com/pkg/errors" + "golang.org/x/mod/semver" + "google.golang.org/api/compute/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2/klogr" "k8s.io/utils/pointer" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-gcp/cloud" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/controllers/noderefutil" @@ -40,13 +44,10 @@ import ( // MachineScopeParams defines the input parameters used to create a new MachineScope. type MachineScopeParams struct { - GCPClients - Client client.Client - Logger logr.Logger - Cluster *clusterv1.Cluster - Machine *clusterv1.Machine - GCPCluster *infrav1.GCPCluster - GCPMachine *infrav1.GCPMachine + Client client.Client + ClusterGetter cloud.ClusterGetter + Machine *clusterv1.Machine + GCPMachine *infrav1.GCPMachine } // NewMachineScope creates a new MachineScope from the supplied parameters. @@ -58,51 +59,38 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { if params.Machine == nil { return nil, errors.New("machine is required when creating a MachineScope") } - if params.Cluster == nil { - return nil, errors.New("cluster is required when creating a MachineScope") - } - if params.GCPCluster == nil { - return nil, errors.New("gcp cluster is required when creating a MachineScope") - } if params.GCPMachine == nil { return nil, errors.New("gcp machine is required when creating a MachineScope") } - if params.Logger == nil { - params.Logger = klogr.New() - } - helper, err := patch.NewHelper(params.GCPMachine, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } return &MachineScope{ - client: params.Client, - Cluster: params.Cluster, - Machine: params.Machine, - GCPCluster: params.GCPCluster, - GCPMachine: params.GCPMachine, - Logger: params.Logger, - patchHelper: helper, + client: params.Client, + Machine: params.Machine, + GCPMachine: params.GCPMachine, + ClusterGetter: params.ClusterGetter, + patchHelper: helper, }, nil } // MachineScope defines a scope defined around a machine and its cluster. type MachineScope struct { - logr.Logger - client client.Client - patchHelper *patch.Helper - - Cluster *clusterv1.Cluster - Machine *clusterv1.Machine - GCPCluster *infrav1.GCPCluster - GCPMachine *infrav1.GCPMachine + client client.Client + patchHelper *patch.Helper + ClusterGetter cloud.ClusterGetter + Machine *clusterv1.Machine + GCPMachine *infrav1.GCPMachine } -// Region returns the GCPMachine region. -func (m *MachineScope) Region() string { - return m.GCPCluster.Spec.Region +// ANCHOR: MachineGetter + +// Cloud returns initialized cloud. +func (m *MachineScope) Cloud() cloud.Cloud { + return m.ClusterGetter.Cloud() } // Zone returns the FailureDomain for the GCPMachine. @@ -124,6 +112,11 @@ func (m *MachineScope) Namespace() string { return m.GCPMachine.Namespace } +// ControlPlaneGroupName returns the control-plane instance group name. +func (m *MachineScope) ControlPlaneGroupName() string { + return fmt.Sprintf("%s-%s-%s", m.ClusterGetter.Name(), infrav1.APIServerRoleTagValue, m.Zone()) +} + // IsControlPlane returns true if the machine is a control plane. func (m *MachineScope) IsControlPlane() bool { return util.IsControlPlaneMachine(m.Machine) @@ -157,9 +150,14 @@ func (m *MachineScope) GetProviderID() string { return "" } +// ANCHOR_END: MachineGetter + +// ANCHOR: MachineSetter + // SetProviderID sets the GCPMachine providerID in spec. -func (m *MachineScope) SetProviderID(v string) { - m.GCPMachine.Spec.ProviderID = pointer.StringPtr(v) +func (m *MachineScope) SetProviderID() { + providerID := cloud.ProviderIDPrefix + path.Join(m.ClusterGetter.Project(), m.Zone(), m.Name()) + m.GCPMachine.Spec.ProviderID = pointer.StringPtr(providerID) } // GetInstanceStatus returns the GCPMachine instance status. @@ -200,6 +198,151 @@ func (m *MachineScope) SetAddresses(addressList []corev1.NodeAddress) { m.GCPMachine.Status.Addresses = addressList } +// ANCHOR_END: MachineSetter + +// ANCHOR: MachineInstanceSpec + +// InstanceImageSpec returns compute instance image attched-disk spec. +func (m *MachineScope) InstanceImageSpec() *compute.AttachedDisk { + image := "capi-ubuntu-1804-k8s-" + strings.ReplaceAll(semver.MajorMinor(*m.Machine.Spec.Version), ".", "-") + sourceImage := path.Join("projects", m.ClusterGetter.Project(), "global", "images", "family", image) + if m.GCPMachine.Spec.Image != nil { + sourceImage = *m.GCPMachine.Spec.Image + } else if m.GCPMachine.Spec.ImageFamily != nil { + sourceImage = *m.GCPMachine.Spec.ImageFamily + } + + diskType := infrav1.PdStandardDiskType + if t := m.GCPMachine.Spec.RootDeviceType; t != nil { + diskType = *t + } + + return &compute.AttachedDisk{ + AutoDelete: true, + Boot: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskSizeGb: m.GCPMachine.Spec.RootDeviceSize, + DiskType: path.Join("zones", m.Zone(), "diskTypes", string(diskType)), + SourceImage: sourceImage, + }, + } +} + +// InstanceAdditionalDiskSpec returns compute instance additional attched-disk spec. +func (m *MachineScope) InstanceAdditionalDiskSpec() []*compute.AttachedDisk { + additionalDisks := make([]*compute.AttachedDisk, 0, len(m.GCPMachine.Spec.AdditionalDisks)) + for _, disk := range m.GCPMachine.Spec.AdditionalDisks { + additionalDisk := &compute.AttachedDisk{ + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskSizeGb: pointer.Int64PtrDerefOr(disk.Size, 30), + DiskType: path.Join("zones", m.Zone(), "diskTypes", string(*disk.DeviceType)), + }, + } + if additionalDisk.InitializeParams.DiskType == string(infrav1.LocalSsdDiskType) { + additionalDisk.Type = "SCRATCH" // Default is PERSISTENT. + // Override the Disk size + additionalDisk.InitializeParams.DiskSizeGb = 375 + // For local SSDs set interface to NVME (instead of default SCSI) which is faster. + // Most OS images would work with both NVME and SCSI disks but some may work + // considerably faster with NVME. + // https://cloud.google.com/compute/docs/disks/local-ssd#choose_an_interface + additionalDisk.Interface = "NVME" + } + additionalDisks = append(additionalDisks, additionalDisk) + } + + return additionalDisks +} + +// InstanceNetworkInterfaceSpec returns compute network interface spec. +func (m *MachineScope) InstanceNetworkInterfaceSpec() *compute.NetworkInterface { + networkInterface := &compute.NetworkInterface{ + Network: path.Join("projects", m.ClusterGetter.Project(), "global", "networks", m.ClusterGetter.NetworkName()), + } + + if m.GCPMachine.Spec.PublicIP != nil && *m.GCPMachine.Spec.PublicIP { + networkInterface.AccessConfigs = []*compute.AccessConfig{ + { + Type: "ONE_TO_ONE_NAT", + Name: "External NAT", + }, + } + } + + if m.GCPMachine.Spec.Subnet != nil { + networkInterface.Subnetwork = path.Join("regions", m.ClusterGetter.Region(), "subnetworks", *m.GCPMachine.Spec.Subnet) + } + + return networkInterface +} + +// InstanceServiceAccountsSpec returns service-account spec. +func (m *MachineScope) InstanceServiceAccountsSpec() *compute.ServiceAccount { + serviceAccount := &compute.ServiceAccount{ + Email: "default", + Scopes: []string{ + compute.CloudPlatformScope, + }, + } + + if m.GCPMachine.Spec.ServiceAccount != nil { + serviceAccount.Email = m.GCPMachine.Spec.ServiceAccount.Email + serviceAccount.Scopes = m.GCPMachine.Spec.ServiceAccount.Scopes + } + + return serviceAccount +} + +// InstanceAdditionalMetadataSpec returns additional metadata spec. +func (m *MachineScope) InstanceAdditionalMetadataSpec() *compute.Metadata { + metadata := new(compute.Metadata) + for _, additionalMetadata := range m.GCPMachine.Spec.AdditionalMetadata { + metadata.Items = append(metadata.Items, &compute.MetadataItems{ + Key: additionalMetadata.Key, + Value: additionalMetadata.Value, + }) + } + + return metadata +} + +// InstanceSpec returns instance spec. +func (m *MachineScope) InstanceSpec() *compute.Instance { + instance := &compute.Instance{ + Name: m.Name(), + Zone: m.Zone(), + MachineType: path.Join("zones", m.Zone(), "machineTypes", m.GCPMachine.Spec.InstanceType), + CanIpForward: true, + Tags: &compute.Tags{ + Items: append( + m.GCPMachine.Spec.AdditionalNetworkTags, + fmt.Sprintf("%s-%s", m.ClusterGetter.Name(), m.Role()), + m.ClusterGetter.Name(), + ), + }, + Labels: infrav1.Build(infrav1.BuildParams{ + ClusterName: m.ClusterGetter.Name(), + Lifecycle: infrav1.ResourceLifecycleOwned, + Role: pointer.StringPtr(m.Role()), + // TODO(vincepri): Check what needs to be added for the cloud provider label. + Additional: m.ClusterGetter.AdditionalLabels().AddLabels(m.GCPMachine.Spec.AdditionalLabels), + }), + Scheduling: &compute.Scheduling{ + Preemptible: m.GCPMachine.Spec.Preemptible, + }, + } + + instance.Disks = append(instance.Disks, m.InstanceImageSpec()) + instance.Disks = append(instance.Disks, m.InstanceAdditionalDiskSpec()...) + instance.Metadata = m.InstanceAdditionalMetadataSpec() + instance.ServiceAccounts = append(instance.ServiceAccounts, m.InstanceServiceAccountsSpec()) + instance.NetworkInterfaces = append(instance.NetworkInterfaces, m.InstanceNetworkInterfaceSpec()) + return instance +} + +// ANCHOR_END: MachineInstanceSpec + // GetBootstrapData returns the bootstrap data from the secret in the Machine's bootstrap.dataSecretName. func (m *MachineScope) GetBootstrapData() (string, error) { if m.Machine.Spec.Bootstrap.DataSecretName == nil { diff --git a/cloud/services/compute/firewalls.go b/cloud/services/compute/firewalls.go deleted file mode 100644 index c51b6a914..000000000 --- a/cloud/services/compute/firewalls.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package compute - -import ( - "fmt" - "strconv" - - "github.com/pkg/errors" - "google.golang.org/api/compute/v1" - - infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/gcperrors" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/wait" -) - -// ReconcileFirewalls reconciles the firewalls and apply changes if needed. -func (s *Service) ReconcileFirewalls() error { - for _, firewallSpec := range s.getFirewallSpecs() { - // Get or create the firewall rules. - firewall, err := s.firewalls.Get(s.scope.Project(), firewallSpec.Name).Do() - if gcperrors.IsNotFound(err) { - op, err := s.firewalls.Insert(s.scope.Project(), firewallSpec).Do() - if err != nil { - return errors.Wrapf(err, "failed to create firewall rule") - } - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return errors.Wrapf(err, "failed to create firewall rule") - } - firewall, err = s.firewalls.Get(s.scope.Project(), firewallSpec.Name).Do() - if err != nil { - return errors.Wrapf(err, "failed to describe firewall rule") - } - } else if err != nil { - return errors.Wrapf(err, "failed to describe firewall rule") - } - - // Store in the Cluster Status. - if s.scope.Network().FirewallRules == nil { - s.scope.Network().FirewallRules = make(map[string]string) - } - s.scope.Network().FirewallRules[firewall.Name] = firewall.SelfLink - } - - return nil -} - -// DeleteFirewalls deletes all Firewall Rules. -func (s *Service) DeleteFirewalls() error { - for name := range s.scope.Network().FirewallRules { - op, err := s.firewalls.Delete(s.scope.Project(), name).Do() - if opErr := s.checkOrWaitForDeleteOp(op, err); opErr != nil { - return errors.Wrapf(opErr, "failed to delete firewalls") - } - delete(s.scope.Network().FirewallRules, name) - } - - return nil -} - -func (s *Service) getFirewallSpecs() []*compute.Firewall { - return []*compute.Firewall{ - { - Name: fmt.Sprintf("allow-%s-%s-healthchecks", s.scope.Name(), infrav1.APIServerRoleTagValue), - Network: s.scope.NetworkSelfLink(), - Allowed: []*compute.FirewallAllowed{ - { - IPProtocol: "TCP", - Ports: []string{ - strconv.FormatInt(s.scope.LoadBalancerBackendPort(), 10), - }, - }, - }, - Direction: "INGRESS", - SourceRanges: []string{ - // Allow Google's internal IP ranges to perform health checks against our registered API servers. - // For more information, https://cloud.google.com/load-balancing/docs/health-checks#fw-rule. - "35.191.0.0/16", - "130.211.0.0/22", - }, - TargetTags: []string{ - fmt.Sprintf("%s-control-plane", s.scope.Name()), - }, - }, - { - Name: fmt.Sprintf("allow-%s-%s-cluster", s.scope.Name(), infrav1.APIServerRoleTagValue), - Network: s.scope.NetworkSelfLink(), - Allowed: []*compute.FirewallAllowed{ - { - IPProtocol: "all", - }, - }, - Direction: "INGRESS", - SourceTags: []string{ - fmt.Sprintf("%s-control-plane", s.scope.Name()), - fmt.Sprintf("%s-node", s.scope.Name()), - }, - TargetTags: []string{ - fmt.Sprintf("%s-control-plane", s.scope.Name()), - fmt.Sprintf("%s-node", s.scope.Name()), - }, - }, - } -} diff --git a/cloud/services/compute/firewalls/doc.go b/cloud/services/compute/firewalls/doc.go new file mode 100644 index 000000000..ab958e050 --- /dev/null +++ b/cloud/services/compute/firewalls/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package firewalls implements reconciler for cluster firwall components. +package firewalls diff --git a/cloud/services/compute/firewalls/reconcile.go b/cloud/services/compute/firewalls/reconcile.go new file mode 100644 index 000000000..141e74891 --- /dev/null +++ b/cloud/services/compute/firewalls/reconcile.go @@ -0,0 +1,67 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package firewalls + +import ( + "context" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + + "sigs.k8s.io/controller-runtime/pkg/log" + + "sigs.k8s.io/cluster-api-provider-gcp/cloud/gcperrors" +) + +// Reconcile reconcile cluster firewall compoenents. +func (s *Service) Reconcile(ctx context.Context) error { + log := log.FromContext(ctx) + log.Info("Reconciling firewall resources") + for _, spec := range s.scope.FirewallRulesSpec() { + log.V(2).Info("Looking firewall", "name", spec.Name) + firewallKey := meta.GlobalKey(spec.Name) + if _, err := s.firewalls.Get(ctx, firewallKey); err != nil { + if !gcperrors.IsNotFound(err) { + return err + } + + log.V(2).Info("Creating firewall", "name", spec.Name) + if err := s.firewalls.Insert(ctx, firewallKey, spec); err != nil { + return err + } + } + } + + return nil +} + +// Delete delete cluster firewall compoenents. +func (s *Service) Delete(ctx context.Context) error { + log := log.FromContext(ctx) + log.Info("Deleting network resources") + for _, spec := range s.scope.FirewallRulesSpec() { + log.V(2).Info("Deleting firewall", "name", spec.Name) + firewallKey := meta.GlobalKey(spec.Name) + if err := s.firewalls.Delete(ctx, firewallKey); err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error deleting firewall", "name", spec.Name) + return err + } + } + } + + return nil +} diff --git a/cloud/services/compute/firewalls/service.go b/cloud/services/compute/firewalls/service.go new file mode 100644 index 000000000..5c25ce477 --- /dev/null +++ b/cloud/services/compute/firewalls/service.go @@ -0,0 +1,55 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package firewalls + +import ( + "context" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "google.golang.org/api/compute/v1" + + "sigs.k8s.io/cluster-api-provider-gcp/cloud" +) + +type firewallsInterface interface { + Get(ctx context.Context, key *meta.Key) (*compute.Firewall, error) + Insert(ctx context.Context, key *meta.Key, obj *compute.Firewall) error + Update(ctx context.Context, key *meta.Key, obj *compute.Firewall) error + Delete(ctx context.Context, key *meta.Key) error +} + +// Scope is an interfaces that hold used methods. +type Scope interface { + cloud.ClusterGetter + FirewallRulesSpec() []*compute.Firewall +} + +// Service implements firewalls reconciler. +type Service struct { + scope Scope + firewalls firewallsInterface +} + +var _ cloud.Reconciler = &Service{} + +// New returns Service from given scope. +func New(scope Scope) *Service { + return &Service{ + scope: scope, + firewalls: scope.Cloud().Firewalls(), + } +} diff --git a/cloud/services/compute/instancegroup.go b/cloud/services/compute/instancegroup.go deleted file mode 100644 index fffa88421..000000000 --- a/cloud/services/compute/instancegroup.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package compute - -import ( - "fmt" - "path" - - "github.com/pkg/errors" - "google.golang.org/api/compute/v1" - - infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/gcperrors" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/wait" -) - -// ReconcileInstanceGroups reconciles the instances groups and apply changes if needed. -func (s *Service) ReconcileInstanceGroups() error { - // Get each available zone. - zones, err := s.GetZones() - if err != nil { - return err - } - - // Reconcile API Server instance groups and record them. - for _, zone := range zones { - name := fmt.Sprintf("%s-%s-%s", s.scope.Name(), infrav1.APIServerRoleTagValue, zone) - group, err := s.instancegroups.Get(s.scope.Project(), zone, name).Do() - switch { - case gcperrors.IsNotFound(err): - continue - case err != nil: - return errors.Wrapf(err, "failed to describe instance group %q", name) - default: - if s.scope.Network().APIServerInstanceGroups == nil { - s.scope.Network().APIServerInstanceGroups = make(map[string]string) - } - s.scope.Network().APIServerInstanceGroups[zone] = group.SelfLink - } - } - - return nil -} - -// DeleteInstanceGroups deletes a instance group. -func (s *Service) DeleteInstanceGroups() error { - for zone, groupSelfLink := range s.scope.Network().APIServerInstanceGroups { - name := path.Base(groupSelfLink) - op, err := s.instancegroups.Delete(s.scope.Project(), zone, name).Do() - if opErr := s.checkOrWaitForDeleteOp(op, err); opErr != nil { - return errors.Wrapf(opErr, "failed to delete instance group") - } - } - - return nil -} - -// GetOrCreateInstanceGroup retrieve an instance group or create it. -func (s *Service) GetOrCreateInstanceGroup(zone, name string) (*compute.InstanceGroup, error) { - group, err := s.instancegroups.Get(s.scope.Project(), zone, name).Do() - if gcperrors.IsNotFound(err) { - spec := &compute.InstanceGroup{ - Name: name, - Network: s.scope.NetworkSelfLink(), - NamedPorts: []*compute.NamedPort{ - { - Name: "apiserver", - Port: s.scope.LoadBalancerBackendPort(), - }, - }, - } - op, err := s.instancegroups.Insert(s.scope.Project(), zone, spec).Do() - if err != nil { - return nil, errors.Wrapf(err, "failed to create instance group") - } - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return nil, errors.Wrapf(err, "failed to create instance group") - } - group, err = s.instancegroups.Get(s.scope.Project(), zone, name).Do() - if err != nil { - return nil, errors.Wrapf(err, "failed to describe instance group") - } - } else if err != nil { - return nil, errors.Wrapf(err, "failed to describe instance group") - } - - return group, nil -} - -// GetInstanceGroupMembers retrieves the instances for a group. -func (s *Service) GetInstanceGroupMembers(zone, name string) ([]*compute.InstanceWithNamedPorts, error) { - members, err := s.instancegroups. - ListInstances(s.scope.Project(), zone, name, &compute.InstanceGroupsListInstancesRequest{}). - Do() - if err != nil { - return nil, errors.Wrapf(err, "could not list instances in group %q", name) - } - - return members.Items, nil -} - -// EnsureInstanceGroupMember ensure the instance are part of a group. -func (s *Service) EnsureInstanceGroupMember(zone, name string, i *compute.Instance) error { - members, err := s.GetInstanceGroupMembers(zone, name) - if err != nil { - return err - } - - // If the instance is already registered, return early. - for _, registered := range members { - if registered.Instance == i.SelfLink { - return nil - } - } - - // Register the instance with the group - req := &compute.InstanceGroupsAddInstancesRequest{ - Instances: []*compute.InstanceReference{ - { - Instance: i.SelfLink, - }, - }, - } - op, err := s.instancegroups.AddInstances(s.scope.Project(), zone, name, req).Do() - if err != nil { - return errors.Wrapf(err, "failed to add instance to group") - } - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return errors.Wrapf(err, "failed to add instance to group") - } - - return nil -} diff --git a/cloud/services/compute/instances.go b/cloud/services/compute/instances.go deleted file mode 100644 index 1754fc87b..000000000 --- a/cloud/services/compute/instances.go +++ /dev/null @@ -1,261 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package compute - -import ( - "fmt" - - "github.com/blang/semver/v4" - "github.com/pkg/errors" - "google.golang.org/api/compute/v1" - "k8s.io/utils/pointer" - "sigs.k8s.io/cluster-api/util/record" - - infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/gcperrors" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/wait" -) - -const ( - defaultDiskSizeGB = 30 -) - -// InstanceIfExists returns the existing instance or nothing if it doesn't exist. -func (s *Service) InstanceIfExists(scope *scope.MachineScope) (*compute.Instance, error) { - log := s.scope.Logger.WithValues("instance-name", scope.Name()) - log.V(2).Info("Looking for instance by name") - - res, err := s.instances.Get(s.scope.Project(), scope.Zone(), scope.Name()).Do() - switch { - case gcperrors.IsNotFound(err): - return nil, nil - case err != nil: - return nil, errors.Wrapf(err, "failed to describe instance: %q", scope.Name()) - } - - return res, nil -} - -func diskTypePtrDerefOrDefault(ptr *infrav1.DiskType) infrav1.DiskType { - if ptr != nil { - return *ptr - } - return infrav1.PdStandardDiskType -} - -func diskTypeURL(zone string, dt *infrav1.DiskType) string { - return fmt.Sprintf("zones/%s/diskTypes/%s", zone, diskTypePtrDerefOrDefault(dt)) -} - -// CreateInstance runs a GCE instance. -func (s *Service) CreateInstance(scope *scope.MachineScope) (*compute.Instance, error) { - log := s.scope.Logger.WithValues("machine-role", scope.Role()) - log.V(2).Info("Creating an instance") - - bootstrapData, err := scope.GetBootstrapData() - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve bootstrap data") - } - - sourceImage, err := s.rootDiskImage(scope) - if err != nil { - return nil, err - } - - input := &compute.Instance{ - Name: scope.Name(), - Zone: scope.Zone(), - MachineType: fmt.Sprintf("zones/%s/machineTypes/%s", scope.Zone(), scope.GCPMachine.Spec.InstanceType), - CanIpForward: true, - NetworkInterfaces: []*compute.NetworkInterface{{ - Network: s.scope.NetworkSelfLink(), - }}, - Tags: &compute.Tags{ - Items: append( - scope.GCPMachine.Spec.AdditionalNetworkTags, - fmt.Sprintf("%s-%s", scope.Cluster.Name, scope.Role()), - scope.Cluster.Name, - ), - }, - Disks: []*compute.AttachedDisk{ - { - AutoDelete: true, - Boot: true, - InitializeParams: &compute.AttachedDiskInitializeParams{ - DiskSizeGb: defaultDiskSizeGB, - DiskType: diskTypeURL(scope.Zone(), scope.GCPMachine.Spec.RootDeviceType), - SourceImage: sourceImage, - }, - }, - }, - Metadata: &compute.Metadata{ - Items: []*compute.MetadataItems{ - { - Key: "user-data", - Value: pointer.StringPtr(bootstrapData), - }, - }, - }, - ServiceAccounts: []*compute.ServiceAccount{ - { - Email: "default", - Scopes: []string{ - compute.CloudPlatformScope, - }, - }, - }, - Scheduling: &compute.Scheduling{ - Preemptible: scope.GCPMachine.Spec.Preemptible, - }, - } - - for _, m := range scope.GCPMachine.Spec.AdditionalMetadata { - input.Metadata.Items = append(input.Metadata.Items, &compute.MetadataItems{ - Key: m.Key, - Value: m.Value, - }) - } - - if scope.GCPMachine.Spec.ServiceAccount != nil { - serviceAccount := scope.GCPMachine.Spec.ServiceAccount - input.ServiceAccounts = []*compute.ServiceAccount{ - { - Email: serviceAccount.Email, - Scopes: serviceAccount.Scopes, - }, - } - } - - input.Labels = infrav1.Build(infrav1.BuildParams{ - ClusterName: s.scope.Name(), - Lifecycle: infrav1.ResourceLifecycleOwned, - Role: pointer.StringPtr(scope.Role()), - // TODO(vincepri): Check what needs to be added for the cloud provider label. - Additional: s.scope. - GCPCluster.Spec. - AdditionalLabels. - AddLabels(scope.GCPMachine.Spec.AdditionalLabels), - }) - - if scope.GCPMachine.Spec.PublicIP != nil && *scope.GCPMachine.Spec.PublicIP { - input.NetworkInterfaces[0].AccessConfigs = []*compute.AccessConfig{ - { - Type: "ONE_TO_ONE_NAT", - Name: "External NAT", - }, - } - } - - if scope.GCPMachine.Spec.RootDeviceSize > 0 { - input.Disks[0].InitializeParams.DiskSizeGb = scope.GCPMachine.Spec.RootDeviceSize - } - for _, d := range scope.GCPMachine.Spec.AdditionalDisks { - ad := &compute.AttachedDisk{ - AutoDelete: true, - InitializeParams: &compute.AttachedDiskInitializeParams{ - DiskSizeGb: pointer.Int64PtrDerefOr(d.Size, defaultDiskSizeGB), - DiskType: diskTypeURL(scope.Zone(), d.DeviceType), - }, - } - - if ad.InitializeParams.DiskType == string(infrav1.LocalSsdDiskType) { - ad.Type = "SCRATCH" // Default is PERSISTENT. - - // Override the Disk size - ad.InitializeParams.DiskSizeGb = 375 - - // For local SSDs set interface to NVME (instead of default SCSI) which is faster. - // Most OS images would work with both NVME and SCSI disks but some may work - // considerably faster with NVME. - // https://cloud.google.com/compute/docs/disks/local-ssd#choose_an_interface - ad.Interface = "NVME" - } - - input.Disks = append(input.Disks, ad) - } - - if scope.GCPMachine.Spec.Subnet != nil { - input.NetworkInterfaces[0].Subnetwork = fmt.Sprintf("regions/%s/subnetworks/%s", - scope.Region(), *scope.GCPMachine.Spec.Subnet) - } - - if s.scope.Network().APIServerAddress == nil { - return nil, errors.New("failed to run controlplane, APIServer address not available") - } - - log.Info("Running instance") - out, err := s.runInstance(input) - if err != nil { - record.Warnf(scope.Machine, "FailedCreate", "Failed to create instance: %v", err) - - return nil, err - } - - record.Eventf(scope.Machine, "SuccessfulCreate", "Created new %s instance with name %q", scope.Role(), out.Name) - - return out, nil -} - -func (s *Service) runInstance(input *compute.Instance) (*compute.Instance, error) { - op, err := s.instances.Insert(s.scope.Project(), input.Zone, input).Do() - if err != nil { - return nil, errors.Wrap(err, "failed to create gcp instance") - } - - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return nil, errors.Wrap(err, "failed to create gcp instance") - } - - return s.instances.Get(s.scope.Project(), input.Zone, input.Name).Do() -} - -// TerminateInstanceAndWait terminates the instance and wait for the termination. -func (s *Service) TerminateInstanceAndWait(scope *scope.MachineScope) error { - op, err := s.instances.Delete(s.scope.Project(), scope.Zone(), scope.Name()).Do() - if opErr := s.checkOrWaitForDeleteOp(op, err); opErr != nil { - return errors.Wrapf(opErr, "failed to terminate instance") - } - - return nil -} - -// rootDiskImage computes the GCE disk image to use as the boot disk. -func (s *Service) rootDiskImage(scope *scope.MachineScope) (string, error) { - if scope.GCPMachine.Spec.Image != nil { - return *scope.GCPMachine.Spec.Image, nil - } else if scope.GCPMachine.Spec.ImageFamily != nil { - return *scope.GCPMachine.Spec.ImageFamily, nil - } - - if scope.Machine.Spec.Version == nil { - return "", errors.Errorf("missing required Spec.Version on Machine %q in namespace %q", - scope.Name(), scope.Namespace()) - } - - version, err := semver.ParseTolerant(*scope.Machine.Spec.Version) - if err != nil { - return "", errors.Wrapf(err, "error parsing Spec.Version on Machine %q in namespace %q, expected valid SemVer string", - scope.Name(), scope.Namespace()) - } - - image := fmt.Sprintf( - "projects/%s/global/images/family/capi-ubuntu-1804-k8s-v%d-%d", - s.scope.Project(), version.Major, version.Minor) - - return image, nil -} diff --git a/cloud/services/compute/instances/doc.go b/cloud/services/compute/instances/doc.go new file mode 100644 index 000000000..0ddae4da4 --- /dev/null +++ b/cloud/services/compute/instances/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package instances implements reconciler for machine instance components. +package instances diff --git a/cloud/services/compute/instances/reconcile.go b/cloud/services/compute/instances/reconcile.go new file mode 100644 index 000000000..02c792b3b --- /dev/null +++ b/cloud/services/compute/instances/reconcile.go @@ -0,0 +1,208 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package instances + +import ( + "context" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "github.com/pkg/errors" + "google.golang.org/api/compute/v1" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/log" + + infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/gcperrors" +) + +// Reconcile reconcile machine instance. +func (s *Service) Reconcile(ctx context.Context) error { + log := log.FromContext(ctx) + log.Info("Reconciling instance resources") + instance, err := s.createOrGetInstance(ctx) + if err != nil { + return err + } + + addresses := make([]corev1.NodeAddress, 0, len(instance.NetworkInterfaces)) + for _, iface := range instance.NetworkInterfaces { + addresses = append(addresses, corev1.NodeAddress{ + Type: corev1.NodeInternalIP, + Address: iface.NetworkIP, + }) + + for _, ac := range iface.AccessConfigs { + addresses = append(addresses, corev1.NodeAddress{ + Type: corev1.NodeExternalIP, + Address: ac.NatIP, + }) + } + } + + s.scope.SetProviderID() + s.scope.SetAddresses(addresses) + s.scope.SetInstanceStatus(infrav1.InstanceStatus(instance.Status)) + + if s.scope.IsControlPlane() { + if err := s.registerControlPlaneInstance(ctx, instance); err != nil { + return err + } + } + + return nil +} + +// Delete delete machine instance. +func (s *Service) Delete(ctx context.Context) error { + log := log.FromContext(ctx) + log.Info("Deleting instance resources") + instanceSpec := s.scope.InstanceSpec() + instanceName := instanceSpec.Name + instanceKey := meta.ZonalKey(instanceName, s.scope.Zone()) + log.V(2).Info("Looking for instance before deleting", "name", instanceName, "zone", s.scope.Zone()) + instance, err := s.instances.Get(ctx, instanceKey) + if err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error looking for instnace before deleting", "name", instanceName) + return err + } + + return nil + } + + if s.scope.IsControlPlane() { + if err := s.deregisterControlPlaneInstance(ctx, instance); err != nil { + return err + } + } + + log.V(2).Info("Deleting instance", "name", instanceName, "zone", s.scope.Zone()) + return gcperrors.IgnoreNotFound(s.instances.Delete(ctx, instanceKey)) +} + +func (s *Service) createOrGetInstance(ctx context.Context) (*compute.Instance, error) { + log := log.FromContext(ctx) + log.V(2).Info("Getting bootstrap data for machine") + bootstrapData, err := s.scope.GetBootstrapData() + if err != nil { + log.Error(err, "Error getting bootstrap data for machine") + return nil, errors.Wrap(err, "failed to retrieve bootstrap data") + } + + instanceSpec := s.scope.InstanceSpec() + instanceName := instanceSpec.Name + instanceKey := meta.ZonalKey(instanceName, s.scope.Zone()) + instanceSpec.Metadata.Items = append(instanceSpec.Metadata.Items, &compute.MetadataItems{ + Key: "user-data", + Value: pointer.StringPtr(bootstrapData), + }) + + log.V(2).Info("Looking for instance", "name", instanceName, "zone", s.scope.Zone()) + instance, err := s.instances.Get(ctx, instanceKey) + if err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error looking for instnace", "name", instanceName, "zone", s.scope.Zone()) + return nil, err + } + + log.V(2).Info("Creating an instance", "name", instanceName, "zone", s.scope.Zone()) + if err := s.instances.Insert(ctx, instanceKey, instanceSpec); err != nil { + log.Error(err, "Error creating an instnace", "name", instanceName, "zone", s.scope.Zone()) + return nil, err + } + + instance, err = s.instances.Get(ctx, instanceKey) + if err != nil { + return nil, err + } + } + + return instance, nil +} + +func (s *Service) registerControlPlaneInstance(ctx context.Context, instance *compute.Instance) error { + log := log.FromContext(ctx) + instancegroupName := s.scope.ControlPlaneGroupName() + log.V(2).Info("Ensuring instance already registered in the instancegroup", "name", instance.Name, "instancegroup", instancegroupName) + instancegroupKey := meta.ZonalKey(instancegroupName, s.scope.Zone()) + instanceList, err := s.instancegroups.ListInstances(ctx, instancegroupKey, &compute.InstanceGroupsListInstancesRequest{ + InstanceState: "RUNNING", + }, filter.None) + if err != nil { + log.Error(err, "Error retrieving list of instances in the instancegroup", "instancegroup", instancegroupName) + return err + } + + instanceSets := sets.NewString() + defer instanceSets.Delete() + for _, i := range instanceList { + instanceSets.Insert(i.Instance) + } + + if !instanceSets.Has(instance.SelfLink) && instance.Status == string(infrav1.InstanceStatusRunning) { + log.V(2).Info("Registering instance in the instancegroup", "name", instance.Name, "instancegroup", instancegroupName) + if err := s.instancegroups.AddInstances(ctx, instancegroupKey, &compute.InstanceGroupsAddInstancesRequest{ + Instances: []*compute.InstanceReference{ + { + Instance: instance.SelfLink, + }, + }, + }); err != nil { + return err + } + } + + return nil +} + +func (s *Service) deregisterControlPlaneInstance(ctx context.Context, instance *compute.Instance) error { + log := log.FromContext(ctx) + instancegroupName := s.scope.ControlPlaneGroupName() + log.V(2).Info("Ensuring instance already registered in the instancegroup", "name", instance.Name, "instancegroup", instancegroupName) + instancegroupKey := meta.ZonalKey(instancegroupName, s.scope.Zone()) + instanceList, err := s.instancegroups.ListInstances(ctx, instancegroupKey, &compute.InstanceGroupsListInstancesRequest{ + InstanceState: "RUNNING", + }, filter.None) + if err != nil { + return err + } + + instanceSets := sets.NewString() + defer instanceSets.Delete() + for _, i := range instanceList { + instanceSets.Insert(i.Instance) + } + + if len(instanceSets.List()) > 0 && instanceSets.Has(instance.SelfLink) { + log.V(2).Info("Deregistering instance in the instancegroup", "name", instance.Name, "instancegroup", instancegroupName) + if err := s.instancegroups.RemoveInstances(ctx, instancegroupKey, &compute.InstanceGroupsRemoveInstancesRequest{ + Instances: []*compute.InstanceReference{ + { + Instance: instance.SelfLink, + }, + }, + }); err != nil { + return err + } + } + + return nil +} diff --git a/cloud/services/compute/instances/reconcile_test.go b/cloud/services/compute/instances/reconcile_test.go new file mode 100644 index 000000000..e923e9bbc --- /dev/null +++ b/cloud/services/compute/instances/reconcile_test.go @@ -0,0 +1,228 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package instances + +import ( + "context" + "net/http" + "reflect" + "testing" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" + + infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func init() { + _ = clusterv1.AddToScheme(scheme.Scheme) + _ = infrav1.AddToScheme(scheme.Scheme) +} + +var fakeBootstrapSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-bootstrap", + Namespace: "default", + }, + Data: map[string][]byte{ + "value": []byte("Zm9vCg=="), + }, +} + +var fakeCluster = &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + Spec: clusterv1.ClusterSpec{}, +} + +var fakeMachine = &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-machine", + Namespace: "default", + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: pointer.String("my-cluster-bootstrap"), + }, + FailureDomain: pointer.String("us-central1-c"), + Version: pointer.String("v1.19.11"), + }, +} + +var fakeGCPCluster = &infrav1.GCPCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + Spec: infrav1.GCPClusterSpec{ + Project: "my-proj", + Region: "us-central1", + }, +} + +var fakeGCPMachine = &infrav1.GCPMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-machine", + Namespace: "default", + }, + Spec: infrav1.GCPMachineSpec{}, +} + +func TestService_createOrGetInstance(t *testing.T) { + fakec := fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithObjects(fakeBootstrapSecret). + Build() + + clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Client: fakec, + Cluster: fakeCluster, + GCPCluster: fakeGCPCluster, + }) + if err != nil { + t.Fatal(err) + } + + machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ + Client: fakec, + Machine: fakeMachine, + GCPMachine: fakeGCPMachine, + ClusterGetter: clusterScope, + }) + if err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + scope Scope + mockInstance *cloud.MockInstances + want *compute.Instance + wantErr bool + }{ + { + name: "instance already exist (should return existing instance)", + scope: machineScope, + mockInstance: &cloud.MockInstances{ + ProjectRouter: &cloud.SingleProjectRouter{ID: "proj-id"}, + Objects: map[meta.Key]*cloud.MockInstancesObj{ + {Name: "my-machine", Zone: "us-central1-c"}: {Obj: &compute.Instance{ + Name: "my-machine", + }}, + }, + }, + want: &compute.Instance{ + Name: "my-machine", + }, + }, + { + name: "error getting instance with non 404 error code (should return an error)", + scope: machineScope, + mockInstance: &cloud.MockInstances{ + ProjectRouter: &cloud.SingleProjectRouter{ID: "proj-id"}, + Objects: map[meta.Key]*cloud.MockInstancesObj{}, + GetHook: func(ctx context.Context, key *meta.Key, m *cloud.MockInstances) (bool, *compute.Instance, error) { + return true, &compute.Instance{}, &googleapi.Error{Code: http.StatusBadRequest} + }, + }, + wantErr: true, + }, + { + name: "instance does not exist (should create instance)", + scope: machineScope, + mockInstance: &cloud.MockInstances{ + ProjectRouter: &cloud.SingleProjectRouter{ID: "proj-id"}, + Objects: map[meta.Key]*cloud.MockInstancesObj{}, + }, + want: &compute.Instance{ + Name: "my-machine", + CanIpForward: true, + Disks: []*compute.AttachedDisk{ + { + AutoDelete: true, + Boot: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskType: "zones/us-central1-c/diskTypes/pd-standard", + SourceImage: "projects/my-proj/global/images/family/capi-ubuntu-1804-k8s-v1-19", + }, + }, + }, + Labels: map[string]string{ + "capg-role": "node", + "capg-cluster-my-cluster": "owned", + }, + MachineType: "zones/us-central1-c/machineTypes", + Metadata: &compute.Metadata{ + Items: []*compute.MetadataItems{ + { + Key: "user-data", + Value: pointer.String("Zm9vCg=="), + }, + }, + }, + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "projects/my-proj/global/networks/default", + }, + }, + SelfLink: "https://www.googleapis.com/compute/v1/projects/proj-id/zones/us-central1-c/instances/my-machine", + Scheduling: &compute.Scheduling{}, + ServiceAccounts: []*compute.ServiceAccount{ + { + Email: "default", + Scopes: []string{"https://www.googleapis.com/auth/cloud-platform"}, + }, + }, + Tags: &compute.Tags{ + Items: []string{ + "my-cluster-node", + "my-cluster", + }, + }, + Zone: "us-central1-c", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.TODO() + s := New(tt.scope) + s.instances = tt.mockInstance + got, err := s.createOrGetInstance(ctx) + if (err != nil) != tt.wantErr { + t.Errorf("Service.createOrGetInstance() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Service.createOrGetInstance() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/cloud/services/compute/instances/service.go b/cloud/services/compute/instances/service.go new file mode 100644 index 000000000..52a1fde34 --- /dev/null +++ b/cloud/services/compute/instances/service.go @@ -0,0 +1,65 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package instances + +import ( + "context" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + + "google.golang.org/api/compute/v1" + "sigs.k8s.io/cluster-api-provider-gcp/cloud" +) + +type instancesInterface interface { + Get(ctx context.Context, key *meta.Key) (*compute.Instance, error) + Insert(ctx context.Context, key *meta.Key, obj *compute.Instance) error + Delete(ctx context.Context, key *meta.Key) error +} + +type instancegroupsInterface interface { + AddInstances(ctx context.Context, key *meta.Key, req *compute.InstanceGroupsAddInstancesRequest) error + ListInstances(ctx context.Context, key *meta.Key, req *compute.InstanceGroupsListInstancesRequest, fl *filter.F) ([]*compute.InstanceWithNamedPorts, error) + RemoveInstances(ctx context.Context, key *meta.Key, req *compute.InstanceGroupsRemoveInstancesRequest) error +} + +// Scope is an interfaces that hold used methods. +type Scope interface { + cloud.Machine + InstanceSpec() *compute.Instance + InstanceImageSpec() *compute.AttachedDisk + InstanceAdditionalDiskSpec() []*compute.AttachedDisk +} + +// Service implements instances reconciler. +type Service struct { + scope Scope + instances instancesInterface + instancegroups instancegroupsInterface +} + +var _ cloud.Reconciler = &Service{} + +// New returns Service from given scope. +func New(scope Scope) *Service { + return &Service{ + scope: scope, + instances: scope.Cloud().Instances(), + instancegroups: scope.Cloud().InstanceGroups(), + } +} diff --git a/cloud/services/compute/loadbalancers.go b/cloud/services/compute/loadbalancers.go deleted file mode 100644 index d4015d8b0..000000000 --- a/cloud/services/compute/loadbalancers.go +++ /dev/null @@ -1,309 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package compute - -import ( - "fmt" - "path" - "time" - - "github.com/pkg/errors" - "google.golang.org/api/compute/v1" - "k8s.io/utils/pointer" - - infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/gcperrors" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/wait" -) - -const ( - // APIServerLoadBalancerProtocol defines the LB protocol. - APIServerLoadBalancerProtocol = "TCP" - // APIServerLoadBalancerHealthCheckProtocol defines the LB health check protocol. - APIServerLoadBalancerHealthCheckProtocol = "SSL" - // APIServerLoadBalancerProxyHeader defines the LB proxy header. - APIServerLoadBalancerProxyHeader = "NONE" - // APIServerLoadBalancerScheme defines the LB scheme. - APIServerLoadBalancerScheme = "EXTERNAL" - // APIServerLoadBalancerIPVersion defines the LB IP type. - APIServerLoadBalancerIPVersion = "IPV4" - // APIServerLoadBalancerBackendPortName defines the LB backend port name. - APIServerLoadBalancerBackendPortName = "apiserver" -) - -// ReconcileLoadbalancers reconciles the api server load balancer. -func (s *Service) ReconcileLoadbalancers() error { - // Reconcile Health Check. - healthCheckSpec := s.getAPIServerHealthCheckSpec() - healthCheck, err := s.healthchecks.Get(s.scope.Project(), healthCheckSpec.Name).Do() - if gcperrors.IsNotFound(err) { - op, err := s.healthchecks.Insert(s.scope.Project(), healthCheckSpec).Do() - if err != nil { - return errors.Wrapf(err, "failed to create health check") - } - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return errors.Wrapf(err, "failed to create health check") - } - healthCheck, err = s.healthchecks.Get(s.scope.Project(), healthCheckSpec.Name).Do() - if err != nil { - return errors.Wrapf(err, "failed to describe health check") - } - } else if err != nil { - return errors.Wrapf(err, "failed to describe health check") - } - - s.scope.Network().APIServerHealthCheck = pointer.StringPtr(healthCheck.SelfLink) - - // Reconcile Backend Service. - backendServiceSpec := s.getAPIServerBackendServiceSpec() - backendService, err := s.backendservices.Get(s.scope.Project(), backendServiceSpec.Name).Do() - if gcperrors.IsNotFound(err) { - op, err := s.backendservices.Insert(s.scope.Project(), backendServiceSpec).Do() - if err != nil { - return errors.Wrapf(err, "failed to create backend service") - } - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return errors.Wrapf(err, "failed to create backend service") - } - backendService, err = s.backendservices.Get(s.scope.Project(), backendServiceSpec.Name).Do() - if err != nil { - return errors.Wrapf(err, "failed to describe backend service") - } - } else if err != nil { - return errors.Wrapf(err, "failed to describe backend service") - } - - s.scope.Network().APIServerBackendService = pointer.StringPtr(backendService.SelfLink) - - // Reconcile Target Proxy. - targetProxySpec := s.getAPIServerTargetProxySpec() - targetProxy, err := s.targetproxies.Get(s.scope.Project(), targetProxySpec.Name).Do() - if gcperrors.IsNotFound(err) { - op, err := s.targetproxies.Insert(s.scope.Project(), targetProxySpec).Do() - if err != nil { - return errors.Wrapf(err, "failed to create target proxy") - } - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return errors.Wrapf(err, "failed to create target proxy") - } - targetProxy, err = s.targetproxies.Get(s.scope.Project(), targetProxySpec.Name).Do() - if err != nil { - return errors.Wrapf(err, "failed to describe target proxy") - } - } else if err != nil { - return errors.Wrapf(err, "failed to describe target proxy") - } - - s.scope.Network().APIServerTargetProxy = pointer.StringPtr(targetProxy.SelfLink) - - // Reconcile Global IP Address. - addressSpec := s.getAPIServerIPAddressSpec() - address, err := s.addresses.Get(s.scope.Project(), addressSpec.Name).Do() - if gcperrors.IsNotFound(err) { - op, err := s.addresses.Insert(s.scope.Project(), addressSpec).Do() - if err != nil { - return errors.Wrapf(err, "failed to create global addresses") - } - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return errors.Wrapf(err, "failed to create global addresses") - } - address, err = s.addresses.Get(s.scope.Project(), addressSpec.Name).Do() - if err != nil { - return errors.Wrapf(err, "failed to describe global addresses") - } - } else if err != nil { - return errors.Wrapf(err, "failed to describe addresses") - } - - s.scope.Network().APIServerAddress = pointer.StringPtr(address.Address) - - // Reconcile Forwarding Rules. - forwardingRuleSpec := s.getAPIServerForwardingRuleSpec() - forwardingRule, err := s.forwardingrules.Get(s.scope.Project(), forwardingRuleSpec.Name).Do() - if gcperrors.IsNotFound(err) { - op, err := s.forwardingrules.Insert(s.scope.Project(), forwardingRuleSpec).Do() - if err != nil { - return errors.Wrapf(err, "failed to create forwarding rules") - } - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return errors.Wrapf(err, "failed to create forwarding rules") - } - forwardingRule, err = s.forwardingrules.Get(s.scope.Project(), forwardingRuleSpec.Name).Do() - if err != nil { - return errors.Wrapf(err, "failed to describe forwarding rules") - } - } else if err != nil { - return errors.Wrapf(err, "failed to describe forwarding rules") - } - - s.scope.Network().APIServerForwardingRule = pointer.StringPtr(forwardingRule.SelfLink) - - return nil -} - -// UpdateBackendServices updates the backend services for a instance group. -func (s *Service) UpdateBackendServices() error { - // Refresh the instance groups available. - if err := s.ReconcileInstanceGroups(); err != nil { - return err - } - - // Retrieve the spec and the current backend service. - backendServiceSpec := s.getAPIServerBackendServiceSpec() - backendService, err := s.backendservices.Get(s.scope.Project(), backendServiceSpec.Name).Do() - if err != nil { - return err - } - - // Update backend service if the list of backends has changed in the spec. - // This might happen if new instance groups for the control plane api server - // are created in additional zones. - if len(backendService.Backends) != len(backendServiceSpec.Backends) { - backendService.Backends = backendServiceSpec.Backends - op, err := s.backendservices.Update(s.scope.Project(), backendService.Name, backendService).Do() - if err != nil { - return errors.Wrapf(err, "failed to update backend service") - } - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return errors.Wrapf(err, "failed to update backend service") - } - } - - return nil -} - -// DeleteLoadbalancers deletes LoadBalancers. -func (s *Service) DeleteLoadbalancers() error { - // Delete Forwarding Rules. - if s.scope.Network().APIServerForwardingRule != nil { - name := path.Base(*s.scope.Network().APIServerForwardingRule) - op, err := s.forwardingrules.Delete(s.scope.Project(), name).Do() - if opErr := s.checkOrWaitForDeleteOp(op, err); opErr != nil { - return errors.Wrapf(opErr, "failed to delete forwarding rules") - } - - s.scope.Network().APIServerForwardingRule = nil - } - - // Delete Global IP. - if s.scope.Network().APIServerAddress != nil { - name := s.getAPIServerIPAddressSpec().Name - op, err := s.addresses.Delete(s.scope.Project(), name).Do() - if opErr := s.checkOrWaitForDeleteOp(op, err); opErr != nil { - return errors.Wrapf(opErr, "failed to delete globalAddress resource") - } - s.scope.Network().APIServerAddress = nil - } - - // Delete Target Proxy. - if s.scope.Network().APIServerTargetProxy != nil { - name := path.Base(*s.scope.Network().APIServerTargetProxy) - op, err := s.targetproxies.Delete(s.scope.Project(), name).Do() - if opErr := s.checkOrWaitForDeleteOp(op, err); opErr != nil { - return errors.Wrapf(opErr, "failed to delete target proxy") - } - s.scope.Network().APIServerTargetProxy = nil - } - - // Delete Backend Service. - if s.scope.Network().APIServerBackendService != nil { - name := path.Base(*s.scope.Network().APIServerBackendService) - op, err := s.backendservices.Delete(s.scope.Project(), name).Do() - if opErr := s.checkOrWaitForDeleteOp(op, err); opErr != nil { - return errors.Wrapf(opErr, "failed to delete backend service") - } - s.scope.Network().APIServerBackendService = nil - } - - // Delete Health Check. - if s.scope.Network().APIServerHealthCheck != nil { - name := path.Base(*s.scope.Network().APIServerHealthCheck) - op, err := s.healthchecks.Delete(s.scope.Project(), name).Do() - if opErr := s.checkOrWaitForDeleteOp(op, err); opErr != nil { - return errors.Wrapf(opErr, "failed to delete health check") - } - s.scope.Network().APIServerHealthCheck = nil - } - - return nil -} - -func (s *Service) getAPIServerHealthCheckSpec() *compute.HealthCheck { - return &compute.HealthCheck{ - Name: fmt.Sprintf("%s-%s", s.scope.Name(), infrav1.APIServerRoleTagValue), - Type: APIServerLoadBalancerHealthCheckProtocol, - SslHealthCheck: &compute.SSLHealthCheck{ - Port: s.scope.LoadBalancerBackendPort(), - PortSpecification: "USE_FIXED_PORT", - }, - CheckIntervalSec: 10, - TimeoutSec: 5, - HealthyThreshold: 5, - UnhealthyThreshold: 3, - } -} - -func (s *Service) getAPIServerBackendServiceSpec() *compute.BackendService { - res := &compute.BackendService{ - Name: fmt.Sprintf("%s-%s", s.scope.Name(), infrav1.APIServerRoleTagValue), - LoadBalancingScheme: APIServerLoadBalancerScheme, - PortName: APIServerLoadBalancerBackendPortName, - Protocol: APIServerLoadBalancerProtocol, - TimeoutSec: int64((10 * time.Minute).Seconds()), - HealthChecks: []string{ - *s.scope.Network().APIServerHealthCheck, - }, - } - - for _, groupSelfLink := range s.scope.Network().APIServerInstanceGroups { - res.Backends = append(res.Backends, &compute.Backend{ - BalancingMode: "UTILIZATION", - Group: groupSelfLink, - }) - } - - return res -} - -func (s *Service) getAPIServerTargetProxySpec() *compute.TargetTcpProxy { - return &compute.TargetTcpProxy{ - Name: fmt.Sprintf("%s-%s", s.scope.Name(), infrav1.APIServerRoleTagValue), - ProxyHeader: APIServerLoadBalancerProxyHeader, - Service: *s.scope.Network().APIServerBackendService, - } -} - -func (s *Service) getAPIServerIPAddressSpec() *compute.Address { - return &compute.Address{ - Name: fmt.Sprintf("%s-%s", s.scope.Name(), infrav1.APIServerRoleTagValue), - AddressType: APIServerLoadBalancerScheme, - IpVersion: APIServerLoadBalancerIPVersion, - } -} - -func (s *Service) getAPIServerForwardingRuleSpec() *compute.ForwardingRule { - frontendPortRange := fmt.Sprintf("%d-%d", s.scope.LoadBalancerFrontendPort(), s.scope.LoadBalancerFrontendPort()) - - return &compute.ForwardingRule{ - Name: fmt.Sprintf("%s-%s", s.scope.Name(), infrav1.APIServerRoleTagValue), - IPAddress: *s.scope.Network().APIServerAddress, - IPProtocol: APIServerLoadBalancerProtocol, - LoadBalancingScheme: APIServerLoadBalancerScheme, - PortRange: frontendPortRange, - Target: *s.scope.Network().APIServerTargetProxy, - } -} diff --git a/cloud/services/compute/loadbalancers/doc.go b/cloud/services/compute/loadbalancers/doc.go new file mode 100644 index 000000000..79050d9d0 --- /dev/null +++ b/cloud/services/compute/loadbalancers/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package loadbalancers implements reconciler for cluster control-plane loadbalancer components. +package loadbalancers diff --git a/cloud/services/compute/loadbalancers/reconcile.go b/cloud/services/compute/loadbalancers/reconcile.go new file mode 100644 index 000000000..d8003b02e --- /dev/null +++ b/cloud/services/compute/loadbalancers/reconcile.go @@ -0,0 +1,379 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancers + +import ( + "context" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + computebeta "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" + + "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/gcperrors" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// Reconcile reconcile cluster control-plane loadbalancer compoenents. +func (s *Service) Reconcile(ctx context.Context) error { + log := log.FromContext(ctx) + log.Info("Reconciling loadbalancer resources") + instancegroups, err := s.createOrGetInstanceGroups(ctx) + if err != nil { + return err + } + + healthcheck, err := s.createOrGetHealthCheck(ctx) + if err != nil { + return err + } + + backendsvc, err := s.createOrGetBackendService(ctx, instancegroups, healthcheck) + if err != nil { + return err + } + + target, err := s.createOrGetTargetTCPProxy(ctx, backendsvc) + if err != nil { + return err + } + + addr, err := s.createOrGetAddress(ctx) + if err != nil { + return err + } + + return s.createForwardingRule(ctx, target, addr) +} + +// Delete delete cluster control-plane loadbalancer compoenents. +func (s *Service) Delete(ctx context.Context) error { + log := log.FromContext(ctx) + log.Info("Deleting loadbalancer resources") + if err := s.deleteForwardingRule(ctx); err != nil { + return err + } + + if err := s.deleteAddress(ctx); err != nil { + return err + } + + if err := s.deleteTargetTCPProxy(ctx); err != nil { + return err + } + + if err := s.deleteBackendService(ctx); err != nil { + return err + } + + if err := s.deleteHealthCheck(ctx); err != nil { + return err + } + + return s.deleteInstanceGroups(ctx) +} + +func (s *Service) createOrGetInstanceGroups(ctx context.Context) ([]*compute.InstanceGroup, error) { + log := log.FromContext(ctx) + fd := s.scope.FailureDomains() + zones := make([]string, 0, len(fd)) + for zone := range fd { + zones = append(zones, zone) + } + + groups := make([]*compute.InstanceGroup, 0, len(zones)) + groupsMap := s.scope.Network().APIServerInstanceGroups + if groupsMap == nil { + groupsMap = make(map[string]string) + } + + for _, zone := range zones { + instancegroupSpec := s.scope.InstanceGroupSpec(zone) + log.V(2).Info("Looking for instancegroup in zone", "zone", zone, "name", instancegroupSpec.Name) + instancegroup, err := s.instancegroups.Get(ctx, meta.ZonalKey(instancegroupSpec.Name, zone)) + if err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error looking for instancegroup in zone", "zone", zone) + return groups, err + } + + log.V(2).Info("Creating instancegroup in zone", "zone", zone, "name", instancegroupSpec.Name) + if err := s.instancegroups.Insert(ctx, meta.ZonalKey(instancegroupSpec.Name, zone), instancegroupSpec); err != nil { + log.Error(err, "Error creating instancegroup", "name", instancegroupSpec.Name) + return groups, err + } + + instancegroup, err = s.instancegroups.Get(ctx, meta.ZonalKey(instancegroupSpec.Name, zone)) + if err != nil { + return groups, err + } + } + + groups = append(groups, instancegroup) + groupsMap[zone] = instancegroup.SelfLink + } + + s.scope.Network().APIServerInstanceGroups = groupsMap + return groups, nil +} + +func (s *Service) createOrGetHealthCheck(ctx context.Context) (*compute.HealthCheck, error) { + log := log.FromContext(ctx) + healthcheckSpec := s.scope.HealthCheckSpec() + log.V(2).Info("Looking for healthcheck", "name", healthcheckSpec.Name) + healthcheck, err := s.healthchecks.Get(ctx, meta.GlobalKey(healthcheckSpec.Name)) + if err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error looking for healthcheck", "name", healthcheckSpec.Name) + return nil, err + } + + log.V(2).Info("Creating a healthcheck", "name", healthcheckSpec.Name) + if err := s.healthchecks.Insert(ctx, meta.GlobalKey(healthcheckSpec.Name), healthcheckSpec); err != nil { + log.Error(err, "Error creating a healthcheck", "name", healthcheckSpec.Name) + return nil, err + } + + healthcheck, err = s.healthchecks.Get(ctx, meta.GlobalKey(healthcheckSpec.Name)) + if err != nil { + return nil, err + } + } + + s.scope.Network().APIServerHealthCheck = pointer.String(healthcheck.SelfLink) + return healthcheck, nil +} + +func (s *Service) createOrGetBackendService(ctx context.Context, instancegroups []*compute.InstanceGroup, healthcheck *compute.HealthCheck) (*compute.BackendService, error) { + log := log.FromContext(ctx) + backends := make([]*compute.Backend, 0, len(instancegroups)) + for _, group := range instancegroups { + backends = append(backends, &compute.Backend{ + BalancingMode: "UTILIZATION", + Group: group.SelfLink, + }) + } + + backendsvcSpec := s.scope.BackendServiceSpec() + backendsvcSpec.Backends = backends + backendsvcSpec.HealthChecks = []string{healthcheck.SelfLink} + backendsvc, err := s.backendservices.Get(ctx, meta.GlobalKey(backendsvcSpec.Name)) + if err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error looking for backendservice", "name", backendsvcSpec.Name) + return nil, err + } + + log.V(2).Info("Creating a backendservice", "name", backendsvcSpec.Name) + if err := s.backendservices.Insert(ctx, meta.GlobalKey(backendsvcSpec.Name), backendsvcSpec); err != nil { + log.Error(err, "Error creating a backendservice", "name", backendsvcSpec.Name) + return nil, err + } + + backendsvc, err = s.backendservices.Get(ctx, meta.GlobalKey(backendsvcSpec.Name)) + if err != nil { + return nil, err + } + } + + if len(backendsvc.Backends) != len(backendsvcSpec.Backends) { + log.V(2).Info("Updating a backendservice", "name", backendsvcSpec.Name) + backendsvc.Backends = backendsvcSpec.Backends + if err := s.backendservices.Update(ctx, meta.GlobalKey(backendsvcSpec.Name), backendsvc); err != nil { + log.Error(err, "Error updating a backendservice", "name", backendsvcSpec.Name) + return nil, err + } + } + + s.scope.Network().APIServerBackendService = pointer.String(backendsvc.SelfLink) + return backendsvc, nil +} + +func (s *Service) createOrGetTargetTCPProxy(ctx context.Context, service *compute.BackendService) (*computebeta.TargetTcpProxy, error) { + log := log.FromContext(ctx) + targetSpec := s.scope.TargetTCPProxySpec() + targetSpec.Service = service.SelfLink + target, err := s.targettcpproxies.Get(ctx, meta.GlobalKey(targetSpec.Name)) + if err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error looking for targettcpproxy", "name", targetSpec.Name) + return nil, err + } + + log.V(2).Info("Creating a targettcpproxy", "name", targetSpec.Name) + if err := s.targettcpproxies.Insert(ctx, meta.GlobalKey(targetSpec.Name), targetSpec); err != nil { + log.Error(err, "Error creating a targettcpproxy", "name", targetSpec.Name) + return nil, err + } + + target, err = s.targettcpproxies.Get(ctx, meta.GlobalKey(targetSpec.Name)) + if err != nil { + return nil, err + } + } + + s.scope.Network().APIServerTargetProxy = pointer.String(target.SelfLink) + return target, nil +} + +func (s *Service) createOrGetAddress(ctx context.Context) (*compute.Address, error) { + log := log.FromContext(ctx) + addrSpec := s.scope.AddressSpec() + log.V(2).Info("Looking for address", "name", addrSpec.Name) + addr, err := s.addresses.Get(ctx, meta.GlobalKey(addrSpec.Name)) + if err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error looking for address", "name", addrSpec.Name) + return nil, err + } + + log.V(2).Info("Creating an address", "name", addrSpec.Name) + if err := s.addresses.Insert(ctx, meta.GlobalKey(addrSpec.Name), addrSpec); err != nil { + log.Error(err, "Error creating an address", "name", addrSpec.Name) + return nil, err + } + + addr, err = s.addresses.Get(ctx, meta.GlobalKey(addrSpec.Name)) + if err != nil { + return nil, err + } + } + + s.scope.Network().APIServerAddress = pointer.String(addr.SelfLink) + endpoint := s.scope.ControlPlaneEndpoint() + endpoint.Host = addr.Address + s.scope.SetControlPlaneEndpoint(endpoint) + return addr, nil +} + +func (s *Service) createForwardingRule(ctx context.Context, target *computebeta.TargetTcpProxy, addr *compute.Address) error { + log := log.FromContext(ctx) + spec := s.scope.ForwardingRuleSpec() + key := meta.GlobalKey(spec.Name) + spec.IPAddress = addr.SelfLink + spec.Target = target.SelfLink + log.V(2).Info("Looking for forwardingrule", "name", spec.Name) + forwarding, err := s.forwardingrules.Get(ctx, key) + if err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error looking for forwardingrule", "name", spec.Name) + return err + } + + log.V(2).Info("Creating a forwardingrule", "name", spec.Name) + if err := s.forwardingrules.Insert(ctx, key, spec); err != nil { + log.Error(err, "Error creating a forwardingrule", "name", spec.Name) + return err + } + + forwarding, err = s.forwardingrules.Get(ctx, key) + if err != nil { + return err + } + } + + s.scope.Network().APIServerForwardingRule = pointer.String(forwarding.SelfLink) + return nil +} + +func (s *Service) deleteForwardingRule(ctx context.Context) error { + log := log.FromContext(ctx) + spec := s.scope.ForwardingRuleSpec() + key := meta.GlobalKey(spec.Name) + log.V(2).Info("Deleting a forwardingrule", "name", spec.Name) + if err := s.forwardingrules.Delete(ctx, key); err != nil && !gcperrors.IsNotFound(err) { + log.Error(err, "Error updating a forwardingrule", "name", spec.Name) + return err + } + + s.scope.Network().APIServerForwardingRule = nil + return nil +} + +func (s *Service) deleteAddress(ctx context.Context) error { + log := log.FromContext(ctx) + spec := s.scope.AddressSpec() + key := meta.GlobalKey(spec.Name) + log.V(2).Info("Deleting a address", "name", spec.Name) + if err := s.addresses.Delete(ctx, key); err != nil && !gcperrors.IsNotFound(err) { + return err + } + + s.scope.Network().APIServerAddress = nil + return nil +} + +func (s *Service) deleteTargetTCPProxy(ctx context.Context) error { + log := log.FromContext(ctx) + spec := s.scope.TargetTCPProxySpec() + key := meta.GlobalKey(spec.Name) + log.V(2).Info("Deleting a targettcpproxy", "name", spec.Name) + if err := s.targettcpproxies.Delete(ctx, key); err != nil && !gcperrors.IsNotFound(err) { + log.Error(err, "Error deleting a targettcpproxy", "name", spec.Name) + return err + } + + s.scope.Network().APIServerTargetProxy = nil + return nil +} + +func (s *Service) deleteBackendService(ctx context.Context) error { + log := log.FromContext(ctx) + spec := s.scope.BackendServiceSpec() + key := meta.GlobalKey(spec.Name) + log.V(2).Info("Deleting a backendservice", "name", spec.Name) + if err := s.backendservices.Delete(ctx, key); err != nil && !gcperrors.IsNotFound(err) { + log.Error(err, "Error deleting a backendservice", "name", spec.Name) + return err + } + + s.scope.Network().APIServerBackendService = nil + return nil +} + +func (s *Service) deleteHealthCheck(ctx context.Context) error { + log := log.FromContext(ctx) + spec := s.scope.HealthCheckSpec() + key := meta.GlobalKey(spec.Name) + log.V(2).Info("Deleting a healthcheck", "name", spec.Name) + if err := s.healthchecks.Delete(ctx, key); err != nil && !gcperrors.IsNotFound(err) { + log.Error(err, "Error deleting a healthcheck", "name", spec.Name) + return err + } + + s.scope.Network().APIServerHealthCheck = nil + return nil +} + +func (s *Service) deleteInstanceGroups(ctx context.Context) error { + log := log.FromContext(ctx) + for zone := range s.scope.Network().APIServerInstanceGroups { + spec := s.scope.InstanceGroupSpec(zone) + key := meta.ZonalKey(spec.Name, zone) + log.V(2).Info("Deleting a instancegroup", "name", spec.Name) + if err := s.instancegroups.Delete(ctx, key); err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error deleting a instancegroup", "name", spec.Name) + return err + } + + delete(s.scope.Network().APIServerInstanceGroups, zone) + } + } + + return nil +} diff --git a/cloud/services/compute/loadbalancers/service.go b/cloud/services/compute/loadbalancers/service.go new file mode 100644 index 000000000..30d5203d6 --- /dev/null +++ b/cloud/services/compute/loadbalancers/service.go @@ -0,0 +1,103 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadbalancers + +import ( + "context" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + computebeta "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" + + "sigs.k8s.io/cluster-api-provider-gcp/cloud" +) + +type addressesInterface interface { + Get(ctx context.Context, key *meta.Key) (*compute.Address, error) + Insert(ctx context.Context, key *meta.Key, obj *compute.Address) error + Delete(ctx context.Context, key *meta.Key) error +} + +type backendservicesInterface interface { + Get(ctx context.Context, key *meta.Key) (*compute.BackendService, error) + Insert(ctx context.Context, key *meta.Key, obj *compute.BackendService) error + Update(context.Context, *meta.Key, *compute.BackendService) error + Delete(ctx context.Context, key *meta.Key) error +} + +type forwardingrulesInterface interface { + Get(ctx context.Context, key *meta.Key) (*compute.ForwardingRule, error) + Insert(ctx context.Context, key *meta.Key, obj *compute.ForwardingRule) error + Delete(ctx context.Context, key *meta.Key) error +} + +type healthchecksInterface interface { + Get(ctx context.Context, key *meta.Key) (*compute.HealthCheck, error) + Insert(ctx context.Context, key *meta.Key, obj *compute.HealthCheck) error + Delete(ctx context.Context, key *meta.Key) error +} + +type instancegroupsInterface interface { + Get(ctx context.Context, key *meta.Key) (*compute.InstanceGroup, error) + List(ctx context.Context, zone string, fl *filter.F) ([]*compute.InstanceGroup, error) + Insert(ctx context.Context, key *meta.Key, obj *compute.InstanceGroup) error + Delete(ctx context.Context, key *meta.Key) error +} + +type targettcpproxiesInterface interface { + Get(ctx context.Context, key *meta.Key) (*computebeta.TargetTcpProxy, error) + Insert(ctx context.Context, key *meta.Key, obj *computebeta.TargetTcpProxy) error + Delete(ctx context.Context, key *meta.Key) error +} + +// Scope is an interfaces that hold used methods. +type Scope interface { + cloud.Cluster + AddressSpec() *compute.Address + BackendServiceSpec() *compute.BackendService + ForwardingRuleSpec() *compute.ForwardingRule + HealthCheckSpec() *compute.HealthCheck + InstanceGroupSpec(zone string) *compute.InstanceGroup + TargetTCPProxySpec() *computebeta.TargetTcpProxy +} + +// Service implements loadbalancers reconciler. +type Service struct { + scope Scope + addresses addressesInterface + backendservices backendservicesInterface + forwardingrules forwardingrulesInterface + healthchecks healthchecksInterface + instancegroups instancegroupsInterface + targettcpproxies targettcpproxiesInterface +} + +var _ cloud.Reconciler = &Service{} + +// New returns Service from given scope. +func New(scope Scope) *Service { + return &Service{ + scope: scope, + addresses: scope.Cloud().GlobalAddresses(), + backendservices: scope.Cloud().BackendServices(), + forwardingrules: scope.Cloud().GlobalForwardingRules(), + healthchecks: scope.Cloud().HealthChecks(), + instancegroups: scope.Cloud().InstanceGroups(), + targettcpproxies: scope.Cloud().BetaTargetTcpProxies(), // This is temporary to use beta API. + } +} diff --git a/cloud/services/compute/network.go b/cloud/services/compute/network.go deleted file mode 100644 index d6f610f6d..000000000 --- a/cloud/services/compute/network.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package compute - -import ( - "fmt" - - "github.com/pkg/errors" - "google.golang.org/api/compute/v1" - "k8s.io/utils/pointer" - - infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/gcperrors" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/wait" -) - -// ReconcileNetwork reconciles the network and apply changes if needed. -func (s *Service) ReconcileNetwork() error { - // Create Network - spec := s.getNetworkSpec() - network, err := s.networks.Get(s.scope.Project(), spec.Name).Do() - autoCreateCloudNat := false - if gcperrors.IsNotFound(err) { - autoCreateCloudNat = true - op, err := s.networks.Insert(s.scope.Project(), spec).Do() - if err != nil { - return errors.Wrapf(err, "failed to create network") - } - - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return errors.Wrapf(err, "failed to create network") - } - - network, err = s.networks.Get(s.scope.Project(), spec.Name).Do() - if err != nil { - return errors.Wrapf(err, "failed to describe network") - } - } else if err != nil { - return errors.Wrapf(err, "failed to describe network") - } - - if autoCreateCloudNat { - if err := s.createCloudNat(network); err != nil { - return errors.Wrapf(err, "failed to create cloudnat gateway") - } - } - - s.scope.GCPCluster.Spec.Network.Name = pointer.StringPtr(network.Name) - s.scope.GCPCluster.Spec.Network.AutoCreateSubnetworks = pointer.BoolPtr(network.AutoCreateSubnetworks) - s.scope.GCPCluster.Status.Network.SelfLink = pointer.StringPtr(network.SelfLink) - - return nil -} - -func (s *Service) getNetworkSpec() *compute.Network { - res := &compute.Network{ - Name: s.scope.NetworkName(), - Description: infrav1.ClusterTagKey(s.scope.Name()), - AutoCreateSubnetworks: true, - } - - if s.scope.GCPCluster.Spec.Network.AutoCreateSubnetworks != nil { - res.AutoCreateSubnetworks = *s.scope.GCPCluster.Spec.Network.AutoCreateSubnetworks - } - - return res -} - -// DeleteNetwork deletes a network. -func (s *Service) DeleteNetwork() error { - network, err := s.networks.Get(s.scope.Project(), s.scope.NetworkName()).Do() - if gcperrors.IsNotFound(err) { - return nil - } - - // Return early if the description doesn't match our ownership tag. - if network.Description != infrav1.ClusterTagKey(s.scope.Name()) { - return nil - } - - // Delete Router. - router, err := s.routers.Get(s.scope.Project(), s.scope.Region(), getRouterName(s.scope.NetworkName())).Do() - if err == nil { - op, err := s.routers.Delete(s.scope.Project(), s.scope.Region(), router.Name).Do() - if opErr := s.checkOrWaitForDeleteOp(op, err); opErr != nil { - return errors.Wrapf(opErr, "failed to delete router") - } - } else if !gcperrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get router to delete") - } - - // Delete Network. - op, err := s.networks.Delete(s.scope.Project(), network.Name).Do() - if opErr := s.checkOrWaitForDeleteOp(op, err); opErr != nil { - return errors.Wrapf(opErr, "failed to delete network") - } - - s.scope.GCPCluster.Spec.Network.Name = nil - - return nil -} - -func (s *Service) createCloudNat(network *compute.Network) error { - router, err := s.routers.Get(s.scope.Project(), s.scope.Region(), getRouterName(s.scope.NetworkName())).Do() - if gcperrors.IsNotFound(err) { - router = s.getRouterSpec(network) - op, err := s.routers.Insert(s.scope.Project(), s.scope.Region(), router).Do() - if err != nil { - return errors.Wrapf(err, "failed to create router") - } - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return errors.Wrapf(err, "failed to wait for create router operation") - } - router, err = s.routers.Get(s.scope.Project(), s.scope.Region(), router.Name).Do() - if err != nil { - return errors.Wrapf(err, "failed to get router after create") - } - } else if err != nil { - return errors.Wrapf(err, "failed to get routers") - } - - if len(router.Nats) == 0 { - router.Nats = []*compute.RouterNat{s.getRouterNatSpec()} - op, err := s.routers.Patch(s.scope.Project(), s.scope.Region(), router.Name, router).Do() - if err != nil { - return errors.Wrapf(err, "failed to patch router to create nat") - } - if err := wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op); err != nil { - return errors.Wrapf(err, "failed to wait for patch router operation") - } - } - - s.scope.GCPCluster.Status.Network.Router = pointer.StringPtr(router.SelfLink) - return nil -} - -func (s *Service) getRouterSpec(network *compute.Network) *compute.Router { - return &compute.Router{ - Name: getRouterName(network.Name), - Network: network.SelfLink, - Nats: []*compute.RouterNat{s.getRouterNatSpec()}, - } -} - -func (s *Service) getRouterNatSpec() *compute.RouterNat { - return &compute.RouterNat{ - Name: getRouterNatName(s.scope.NetworkName()), - NatIpAllocateOption: "AUTO_ONLY", - SourceSubnetworkIpRangesToNat: "ALL_SUBNETWORKS_ALL_IP_RANGES", - } -} - -func getRouterName(network string) string { - return fmt.Sprintf("%s-%s", network, "router") -} -func getRouterNatName(network string) string { - return fmt.Sprintf("%s-%s", network, "nat") -} diff --git a/cloud/services/compute/networks/doc.go b/cloud/services/compute/networks/doc.go new file mode 100644 index 000000000..48178022c --- /dev/null +++ b/cloud/services/compute/networks/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package networks implements reconciler for cluster networking components. +package networks diff --git a/cloud/services/compute/networks/reconcile.go b/cloud/services/compute/networks/reconcile.go new file mode 100644 index 000000000..1764d5234 --- /dev/null +++ b/cloud/services/compute/networks/reconcile.go @@ -0,0 +1,149 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package networks + +import ( + "context" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "google.golang.org/api/compute/v1" + + "k8s.io/utils/pointer" + infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/gcperrors" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// Reconcile reconcile cluster network components. +func (s *Service) Reconcile(ctx context.Context) error { + log := log.FromContext(ctx) + log.Info("Reconciling network resources") + network, err := s.createOrGetNetwork(ctx) + if err != nil { + return err + } + + if network.Description == infrav1.ClusterTagKey(s.scope.Name()) { + router, err := s.createOrGetRouter(ctx, network) + if err != nil { + return err + } + + s.scope.Network().Router = pointer.String(router.SelfLink) + } + + s.scope.Network().SelfLink = pointer.String(network.SelfLink) + return nil +} + +// Delete delete cluster network components. +func (s *Service) Delete(ctx context.Context) error { + log := log.FromContext(ctx) + log.Info("Deleting firewall resources") + networkKey := meta.GlobalKey(s.scope.NetworkName()) + log.V(2).Info("Looking for network before deleting", "name", networkKey) + network, err := s.networks.Get(ctx, networkKey) + if err != nil { + return gcperrors.IgnoreNotFound(err) + } + + if network.Description != infrav1.ClusterTagKey(s.scope.Name()) { + return nil + } + + log.V(2).Info("Found network created by capg", "name", s.scope.NetworkName()) + + routerSpec := s.scope.NatRouterSpec() + routerKey := meta.RegionalKey(routerSpec.Name, s.scope.Region()) + log.V(2).Info("Looking for cloudnat router before deleting", "name", routerSpec.Name) + router, err := s.routers.Get(ctx, routerKey) + if err != nil && !gcperrors.IsNotFound(err) { + return err + } + + if router != nil && router.Description == infrav1.ClusterTagKey(s.scope.Name()) { + if err := s.routers.Delete(ctx, routerKey); err != nil && !gcperrors.IsNotFound(err) { + return err + } + } + + if err := s.networks.Delete(ctx, networkKey); err != nil { + log.Error(err, "Error deleting a network", "name", s.scope.NetworkName()) + return err + } + + s.scope.Network().Router = nil + s.scope.Network().SelfLink = nil + return nil +} + +// createOrGetNetwork creates a network if not exist otherwise return existing network. +func (s *Service) createOrGetNetwork(ctx context.Context) (*compute.Network, error) { + log := log.FromContext(ctx) + log.V(2).Info("Looking for network", "name", s.scope.NetworkName()) + networkKey := meta.GlobalKey(s.scope.NetworkName()) + network, err := s.networks.Get(ctx, networkKey) + if err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error looking for network", "name", s.scope.NetworkName()) + return nil, err + } + + log.V(2).Info("Creating a network", "name", s.scope.NetworkName()) + if err := s.networks.Insert(ctx, networkKey, s.scope.NetworkSpec()); err != nil { + log.Error(err, "Error creating a network", "name", s.scope.NetworkName()) + return nil, err + } + + network, err = s.networks.Get(ctx, networkKey) + if err != nil { + return nil, err + } + } + + return network, nil +} + +// createOrGetRouter creates a cloudnat router if not exist otherwise return the existing. +func (s *Service) createOrGetRouter(ctx context.Context, network *compute.Network) (*compute.Router, error) { + log := log.FromContext(ctx) + spec := s.scope.NatRouterSpec() + log.V(2).Info("Looking for cloudnat router", "name", spec.Name) + routerKey := meta.RegionalKey(spec.Name, s.scope.Region()) + router, err := s.routers.Get(ctx, routerKey) + if err != nil { + if !gcperrors.IsNotFound(err) { + log.Error(err, "Error looking for cloudnat router", "name", spec.Name) + return nil, err + } + + spec.Network = network.SelfLink + spec.Description = infrav1.ClusterTagKey(s.scope.Name()) + log.V(2).Info("Creating a cloudnat router", "name", spec.Name) + if err := s.routers.Insert(ctx, routerKey, spec); err != nil { + log.Error(err, "Error creating a cloudnat router", "name", spec.Name) + return nil, err + } + + router, err = s.routers.Get(ctx, routerKey) + if err != nil { + return nil, err + } + } + + return router, nil +} diff --git a/cloud/services/compute/networks/service.go b/cloud/services/compute/networks/service.go new file mode 100644 index 000000000..eeace4d7b --- /dev/null +++ b/cloud/services/compute/networks/service.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package networks + +import ( + "context" + + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" + "google.golang.org/api/compute/v1" + + "sigs.k8s.io/cluster-api-provider-gcp/cloud" +) + +type networksInterface interface { + Get(ctx context.Context, key *meta.Key) (*compute.Network, error) + Insert(ctx context.Context, key *meta.Key, obj *compute.Network) error + Delete(ctx context.Context, key *meta.Key) error +} + +type routersInterface interface { + Get(ctx context.Context, key *meta.Key) (*compute.Router, error) + Insert(ctx context.Context, key *meta.Key, obj *compute.Router) error + Delete(ctx context.Context, key *meta.Key) error +} + +// Scope is an interfaces that hold used methods. +type Scope interface { + cloud.Cluster + NetworkSpec() *compute.Network + NatRouterSpec() *compute.Router +} + +// Service implements networks reconciler. +type Service struct { + scope Scope + networks networksInterface + routers routersInterface +} + +var _ cloud.Reconciler = &Service{} + +// New returns Service from given scope. +func New(scope Scope) *Service { + return &Service{ + scope: scope, + networks: scope.Cloud().Networks(), + routers: scope.Cloud().Routers(), + } +} diff --git a/cloud/services/compute/regions.go b/cloud/services/compute/regions.go deleted file mode 100644 index 5e535cb06..000000000 --- a/cloud/services/compute/regions.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package compute implements gcp compute services. -package compute - -import ( - "fmt" - - "github.com/pkg/errors" -) - -// GetZones retireves GCP regions. -func (s *Service) GetZones() ([]string, error) { - region, err := s.scope.Compute.Regions.Get(s.scope.Project(), s.scope.Region()).Do() - if err != nil { - return nil, errors.Wrapf(err, "failed to describe region %q", s.scope.Region()) - } - - zones, err := s.scope.Compute.Zones. - List(s.scope.Project()). - Filter(fmt.Sprintf("region = %q", region.SelfLink)). - Do() - if err != nil { - return nil, errors.Wrapf(err, "failed to describe zones in region %q", s.scope.Region()) - } - - res := make([]string, 0, len(zones.Items)) - for _, x := range zones.Items { - res = append(res, x.Name) - } - - return res, nil -} diff --git a/cloud/services/compute/service.go b/cloud/services/compute/service.go deleted file mode 100644 index 1e32ad625..000000000 --- a/cloud/services/compute/service.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package compute - -import ( - "google.golang.org/api/compute/v1" - - "sigs.k8s.io/cluster-api-provider-gcp/cloud/gcperrors" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/wait" -) - -// Service holds a collection of interfaces. -// The interfaces are broken down like this to group functions together. -// One alternative is to have a large list of functions from the gcp client. -type Service struct { - scope *scope.ClusterScope - - // Helper clients for GCP. - instances *compute.InstancesService - instancegroups *compute.InstanceGroupsService - networks *compute.NetworksService - subnetworks *compute.SubnetworksService - healthchecks *compute.HealthChecksService - backendservices *compute.BackendServicesService - targetproxies *compute.TargetTcpProxiesService - addresses *compute.GlobalAddressesService - forwardingrules *compute.GlobalForwardingRulesService - firewalls *compute.FirewallsService - routers *compute.RoutersService -} - -// NewService returns a new service given the gcp api client. -func NewService(scope *scope.ClusterScope) *Service { - return &Service{ - scope: scope, - instances: scope.Compute.Instances, - instancegroups: scope.Compute.InstanceGroups, - networks: scope.Compute.Networks, - subnetworks: scope.Compute.Subnetworks, - healthchecks: scope.Compute.HealthChecks, - backendservices: scope.Compute.BackendServices, - targetproxies: scope.Compute.TargetTcpProxies, - addresses: scope.Compute.GlobalAddresses, - forwardingrules: scope.Compute.GlobalForwardingRules, - firewalls: scope.Compute.Firewalls, - routers: scope.Compute.Routers, - } -} - -// If err == IsNotFound, then return nil -// If err != nil, then return err -// Otherwise should wait for operation to finish. -func (s *Service) checkOrWaitForDeleteOp(op *compute.Operation, err error) error { - if err != nil { - if gcperrors.IsNotFound(err) { - return nil - } - return err - } - - if op == nil { - return nil - } - - return wait.ForComputeOperation(s.scope.Compute, s.scope.Project(), op) -} diff --git a/cloud/wait/wait.go b/cloud/wait/wait.go deleted file mode 100644 index c25ce8ca0..000000000 --- a/cloud/wait/wait.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package wait implements cloud wait operations. -package wait - -import ( - "bytes" - "context" - "fmt" - "path" - "time" - - "github.com/pkg/errors" - "google.golang.org/api/compute/v1" - "k8s.io/klog/v2" -) - -const ( - gceTimeout = time.Minute * 10 - gceWaitSleep = time.Second * 5 -) - -// ForComputeOperation wait when a compute operation is in progress. -func ForComputeOperation(client *compute.Service, project string, op *compute.Operation) error { - start := time.Now() - ctx, cf := context.WithTimeout(context.Background(), gceTimeout) - defer cf() - - var err error - for { - if err = checkComputeOperation(op, err); err != nil || op.Status == "DONE" { - return err - } - klog.V(1).Infof("Wait for %v %q: %v (%d%%): %v", op.OperationType, op.Name, op.Status, op.Progress, op.StatusMessage) - select { - case <-ctx.Done(): - return fmt.Errorf("gce operation %v %q timed out after %v", op.OperationType, op.Name, time.Since(start)) - case <-time.After(gceWaitSleep): - } - op, err = getComputeOperation(client, project, op) - } -} - -// getComputeOperation returns an updated operation. -func getComputeOperation(client *compute.Service, project string, op *compute.Operation) (*compute.Operation, error) { - switch { - case op.Zone != "": - return client.ZoneOperations.Get(project, path.Base(op.Zone), op.Name).Do() - case op.Region != "": - return client.RegionOperations.Get(project, path.Base(op.Region), op.Name).Do() - default: - return client.GlobalOperations.Get(project, op.Name).Do() - } -} - -func checkComputeOperation(op *compute.Operation, err error) error { - if err != nil || op.Error == nil || len(op.Error.Errors) == 0 { - return err - } - var errs bytes.Buffer - for _, v := range op.Error.Errors { - errs.WriteString(v.Message) - errs.WriteByte('\n') - } - - return errors.New(errs.String()) -} diff --git a/controllers/gcpcluster_controller.go b/controllers/gcpcluster_controller.go index d9ad11c6c..659652086 100644 --- a/controllers/gcpcluster_controller.go +++ b/controllers/gcpcluster_controller.go @@ -20,7 +20,8 @@ import ( "context" "time" - "github.com/go-logr/logr" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -28,40 +29,44 @@ import ( "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" + "sigs.k8s.io/cluster-api/util/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" + "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/firewalls" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/loadbalancers" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/networks" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" ) // GCPClusterReconciler reconciles a GCPCluster object. type GCPClusterReconciler struct { client.Client - Log logr.Logger ReconcileTimeout time.Duration WatchFilterValue string } +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=gcpclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=gcpclusters/status,verbs=get;update;patch + func (r *GCPClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { - log := r.Log.WithValues("controller", "GCPCluster") + log := log.FromContext(ctx).WithValues("controller", "GCPCluster") c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options). For(&infrav1.GCPCluster{}). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). - WithEventFilter(predicates.ResourceIsNotExternallyManaged(ctrl.LoggerFrom(ctx))). - Watches( - &source.Kind{Type: &infrav1.GCPMachine{}}, - handler.EnqueueRequestsFromMapFunc(r.GCPMachineToGCPCluster), - ). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log, r.WatchFilterValue)). + WithEventFilter(predicates.ResourceIsNotExternallyManaged(log)). Build(r) if err != nil { return errors.Wrap(err, "error creating controller") @@ -96,29 +101,27 @@ func (r *GCPClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma return nil } -// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=gcpclusters,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=gcpclusters/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch - func (r *GCPClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(r.ReconcileTimeout)) defer cancel() - log := r.Log.WithValues("namespace", req.Namespace, "gcpCluster", req.Name) - // Fetch the GCPCluster instance + log := log.FromContext(ctx) gcpCluster := &infrav1.GCPCluster{} err := r.Get(ctx, req.NamespacedName, gcpCluster) if err != nil { if apierrors.IsNotFound(err) { + log.Info("GCPCluster resource not found or already deleted") return ctrl.Result{}, nil } + log.Error(err, "Unable to fetch GCPCluster resource") return ctrl.Result{}, err } // Fetch the Cluster. cluster, err := util.GetOwnerCluster(ctx, r.Client, gcpCluster.ObjectMeta) if err != nil { + log.Error(err, "Failed to get owner cluster") return ctrl.Result{}, err } if cluster == nil { @@ -131,12 +134,8 @@ func (r *GCPClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) - - // Create the scope. clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ Client: r.Client, - Logger: log, Cluster: cluster, GCPCluster: gcpCluster, }) @@ -153,145 +152,97 @@ func (r *GCPClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Handle deleted clusters if !gcpCluster.DeletionTimestamp.IsZero() { - return r.reconcileDelete(clusterScope) + return r.reconcileDelete(ctx, clusterScope) } // Handle non-deleted clusters - return r.reconcile(clusterScope) + return r.reconcile(ctx, clusterScope) } -func (r *GCPClusterReconciler) reconcile(clusterScope *scope.ClusterScope) (ctrl.Result, error) { - clusterScope.Info("Reconciling GCPCluster") +func (r *GCPClusterReconciler) reconcile(ctx context.Context, clusterScope *scope.ClusterScope) (ctrl.Result, error) { + log := log.FromContext(ctx) + log.Info("Reconciling GCPCluster") - gcpCluster := clusterScope.GCPCluster - - // If the GCPCluster doesn't have our finalizer, add it. - controllerutil.AddFinalizer(gcpCluster, infrav1.ClusterFinalizer) - // Register the finalizer immediately to avoid orphaning AWS resources on delete + controllerutil.AddFinalizer(clusterScope.GCPCluster, infrav1.ClusterFinalizer) if err := clusterScope.PatchObject(); err != nil { return ctrl.Result{}, err } - computeSvc := compute.NewService(clusterScope) - - if err := computeSvc.ReconcileNetwork(); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile network for GCPCluster %s/%s", gcpCluster.Namespace, gcpCluster.Name) - } - - if err := computeSvc.ReconcileFirewalls(); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile firewalls for GCPCluster %s/%s", gcpCluster.Namespace, gcpCluster.Name) - } - - if err := computeSvc.ReconcileInstanceGroups(); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile instance groups for GCPCluster %s/%s", gcpCluster.Namespace, gcpCluster.Name) - } - - if err := computeSvc.ReconcileLoadbalancers(); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile load balancers for GCPCluster %s/%s", gcpCluster.Namespace, gcpCluster.Name) - } - - if gcpCluster.Status.Network.APIServerAddress == nil { - clusterScope.Info("Waiting on API server Global IP Address") - - return ctrl.Result{RequeueAfter: 15 * time.Second}, nil - } - - // Set APIEndpoints so the Cluster API Cluster Controller can pull them - gcpCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ - Host: *gcpCluster.Status.Network.APIServerAddress, - Port: 443, + region, err := clusterScope.Cloud().Regions().Get(ctx, meta.GlobalKey(clusterScope.Region())) + if err != nil { + return ctrl.Result{}, err } - // Set FailureDomains on the GCPCluster Status - zones, err := computeSvc.GetZones() + zones, err := clusterScope.Cloud().Zones().List(ctx, filter.Regexp("region", region.SelfLink)) if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to get available zones for GCPCluster %s/%s", gcpCluster.Namespace, gcpCluster.Name) + return ctrl.Result{}, err } - // FailureDomains list should be empty by default. - gcpCluster.Status.FailureDomains = make(clusterv1.FailureDomains, len(zones)) - - // Iterate through all zones + failureDomains := make(clusterv1.FailureDomains, len(zones)) for _, zone := range zones { - // If we have failuredomains in spec, see if this zone is in valid zone - // Add to the status _only_ if it's mentioned in the gcpCluster spec - if len(gcpCluster.Spec.FailureDomains) > 0 { - for _, fd := range gcpCluster.Spec.FailureDomains { - if fd == zone { - gcpCluster.Status.FailureDomains[zone] = clusterv1.FailureDomainSpec{ + if len(clusterScope.GCPCluster.Spec.FailureDomains) > 0 { + for _, fd := range clusterScope.GCPCluster.Spec.FailureDomains { + if fd == zone.Name { + failureDomains[zone.Name] = clusterv1.FailureDomainSpec{ ControlPlane: true, } } } } else { - gcpCluster.Status.FailureDomains[zone] = clusterv1.FailureDomainSpec{ + failureDomains[zone.Name] = clusterv1.FailureDomainSpec{ ControlPlane: true, } } } - // No errors, so mark us ready so the Cluster API Cluster Controller can pull it - gcpCluster.Status.Ready = true + clusterScope.SetFailureDomains(failureDomains) - return ctrl.Result{}, nil -} - -func (r *GCPClusterReconciler) reconcileDelete(clusterScope *scope.ClusterScope) (ctrl.Result, error) { - clusterScope.Info("Reconciling GCPCluster delete") - - computeSvc := compute.NewService(clusterScope) - gcpCluster := clusterScope.GCPCluster - - if err := computeSvc.DeleteLoadbalancers(); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "error deleting load balancer for GCPCluster %s/%s", gcpCluster.Namespace, gcpCluster.Name) + reconcilers := []cloud.Reconciler{ + networks.New(clusterScope), + firewalls.New(clusterScope), + loadbalancers.New(clusterScope), } - if err := computeSvc.DeleteInstanceGroups(); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "error deleting instance groups for GCPCluster %s/%s", gcpCluster.Namespace, gcpCluster.Name) - } - - if err := computeSvc.DeleteFirewalls(); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "error deleting firewall rules for GCPCluster %s/%s", gcpCluster.Namespace, gcpCluster.Name) + for _, r := range reconcilers { + if err := r.Reconcile(ctx); err != nil { + log.Error(err, "Reconcile error") + record.Warnf(clusterScope.GCPCluster, "GCPClusterReconcile", "Reconcile error - %v", err) + return ctrl.Result{}, err + } } - if err := computeSvc.DeleteNetwork(); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "error deleting network for GCPCluster %s/%s", gcpCluster.Namespace, gcpCluster.Name) + controlPlaneEndpoint := clusterScope.ControlPlaneEndpoint() + if controlPlaneEndpoint.Host == "" { + log.Info("GCPCluster does not have control-plane endpoint yet. Reconciling") + record.Event(clusterScope.GCPCluster, "GCPClusterReconcile", "Waiting for control-plane endpoint") + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } - // Cluster is deleted so remove the finalizer. - controllerutil.RemoveFinalizer(clusterScope.GCPCluster, infrav1.ClusterFinalizer) - + record.Eventf(clusterScope.GCPCluster, "GCPClusterReconcile", "Got control-plane endpoint - %s", controlPlaneEndpoint.Host) + clusterScope.SetReady() + record.Event(clusterScope.GCPCluster, "GCPClusterReconcile", "Reconciled") return ctrl.Result{}, nil } -// GCPMachineToGCPCluster is a handler.ToRequestsFunc to be used to enqeue requests for reconciliation -// of GCPCluster. -func (r *GCPClusterReconciler) GCPMachineToGCPCluster(o client.Object) []ctrl.Request { - m, ok := o.(*infrav1.GCPMachine) - if !ok { - r.Log.Error(errors.Errorf("expected a GCPMachine but got a %T", o), "failed to get GCPCluster for GCPMachine") +func (r *GCPClusterReconciler) reconcileDelete(ctx context.Context, clusterScope *scope.ClusterScope) (ctrl.Result, error) { + log := log.FromContext(ctx) + log.Info("Reconciling Delete GCPCluster") - return nil + reconcilers := []cloud.Reconciler{ + loadbalancers.New(clusterScope), + firewalls.New(clusterScope), + networks.New(clusterScope), } - log := r.Log.WithValues("GCPMachine", m.Name, "Namespace", m.Namespace) - - c, err := util.GetOwnerCluster(context.TODO(), r.Client, m.ObjectMeta) - switch { - case err != nil: - log.Error(err, "failed to get owning cluster") - return nil - case apierrors.IsNotFound(err) || c == nil || c.Spec.InfrastructureRef == nil: - return nil + for _, r := range reconcilers { + if err := r.Delete(ctx); err != nil { + log.Error(err, "Reconcile error") + record.Warnf(clusterScope.GCPCluster, "GCPClusterReconcile", "Reconcile error - %v", err) + return ctrl.Result{}, err + } } - if annotations.IsExternallyManaged(c) { - log.V(4).Info("GCPCluster is externally managed, skipping mapping.") - return nil - } - return []ctrl.Request{ - { - NamespacedName: client.ObjectKey{Namespace: c.Namespace, Name: c.Spec.InfrastructureRef.Name}, - }, - } + controllerutil.RemoveFinalizer(clusterScope.GCPCluster, infrav1.ClusterFinalizer) + record.Event(clusterScope.GCPCluster, "GCPClusterReconcile", "Reconciled") + return ctrl.Result{}, nil } diff --git a/controllers/gcpcluster_controller_test.go b/controllers/gcpcluster_controller_test.go index 73293c5d6..5711c3a7b 100644 --- a/controllers/gcpcluster_controller_test.go +++ b/controllers/gcpcluster_controller_test.go @@ -26,7 +26,6 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" ) var _ = Describe("GCPClusterReconciler", func() { @@ -39,7 +38,6 @@ var _ = Describe("GCPClusterReconciler", func() { reconciler := &GCPClusterReconciler{ Client: k8sClient, - Log: log.Log, } instance := &infrav1.GCPCluster{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} diff --git a/controllers/gcpmachine_controller.go b/controllers/gcpmachine_controller.go index 3b293784a..cb4111f1c 100644 --- a/controllers/gcpmachine_controller.go +++ b/controllers/gcpmachine_controller.go @@ -19,14 +19,10 @@ package controllers import ( "context" - "fmt" "time" - "github.com/go-logr/logr" "github.com/pkg/errors" - gcompute "google.golang.org/api/compute/v1" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" capierrors "sigs.k8s.io/cluster-api/errors" @@ -39,25 +35,30 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/source" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute" + "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/instances" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" ) // GCPMachineReconciler reconciles a GCPMachine object. type GCPMachineReconciler struct { client.Client - Log logr.Logger ReconcileTimeout time.Duration WatchFilterValue string } -func (r *GCPMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { - log := r.Log.WithValues("controller", "GCPMachine") +// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=gcpmachines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=gcpmachines/status,verbs=get;update;patch +func (r *GCPMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + log := ctrl.LoggerFrom(ctx) c, err := ctrl.NewControllerManagedBy(mgr). WithOptions(options). For(&infrav1.GCPMachine{}). @@ -68,14 +69,14 @@ func (r *GCPMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma ). Watches( &source.Kind{Type: &infrav1.GCPCluster{}}, - handler.EnqueueRequestsFromMapFunc(r.GCPClusterToGCPMachines), + handler.EnqueueRequestsFromMapFunc(r.GCPClusterToGCPMachines(ctx)), ). Build(r) if err != nil { return errors.Wrap(err, "error creating controller") } - gcpMachineMapper, err := util.ClusterToObjectsMapper(r.Client, &infrav1.GCPMachineList{}, mgr.GetScheme()) + clusterToObjectFunc, err := util.ClusterToObjectsMapper(r.Client, &infrav1.GCPMachineList{}, mgr.GetScheme()) if err != nil { return errors.Wrap(err, "failed to create mapper for Cluster to GCPMachines") } @@ -83,7 +84,7 @@ func (r *GCPMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma // Add a watch on clusterv1.Cluster object for unpause & ready notifications. if err := c.Watch( &source.Kind{Type: &clusterv1.Cluster{}}, - handler.EnqueueRequestsFromMapFunc(gcpMachineMapper), + handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc), predicates.ClusterUnpausedAndInfrastructureReady(log), ); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") @@ -92,18 +93,51 @@ func (r *GCPMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma return nil } -// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=gcpmachines,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=gcpmachines/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch -// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch -// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch +// GCPClusterToGCPMachines is a handler.ToRequestsFunc to be used to enqeue requests for reconciliation +// of GCPMachines. +func (r *GCPMachineReconciler) GCPClusterToGCPMachines(ctx context.Context) handler.MapFunc { + log := ctrl.LoggerFrom(ctx) + return func(o client.Object) []ctrl.Request { + result := []ctrl.Request{} + + c, ok := o.(*infrav1.GCPCluster) + if !ok { + log.Error(errors.Errorf("expected a GCPCluster but got a %T", o), "failed to get GCPMachine for GCPCluster") + return nil + } + + cluster, err := util.GetOwnerCluster(ctx, r.Client, c.ObjectMeta) + switch { + case apierrors.IsNotFound(err) || cluster == nil: + return result + case err != nil: + log.Error(err, "failed to get owning cluster") + return result + } + + labels := map[string]string{clusterv1.ClusterLabelName: cluster.Name} + machineList := &clusterv1.MachineList{} + if err := r.List(ctx, machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil { + log.Error(err, "failed to list Machines") + return nil + } + for _, m := range machineList.Items { + if m.Spec.InfrastructureRef.Name == "" { + continue + } + name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name} + result = append(result, ctrl.Request{NamespacedName: name}) + } + + return result + } +} func (r *GCPMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultedLoopTimeout(r.ReconcileTimeout)) defer cancel() - logger := r.Log.WithValues("namespace", req.Namespace, "gcpMachine", req.Name) - // Fetch the GCPMachine instance. + log := ctrl.LoggerFrom(ctx) gcpMachine := &infrav1.GCPMachine{} err := r.Get(ctx, req.NamespacedName, gcpMachine) if err != nil { @@ -114,52 +148,42 @@ func (r *GCPMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } - // Fetch the Machine. machine, err := util.GetOwnerMachine(ctx, r.Client, gcpMachine.ObjectMeta) if err != nil { return ctrl.Result{}, err } if machine == nil { - logger.Info("Machine Controller has not yet set OwnerRef") - + log.Info("Machine Controller has not yet set OwnerRef") return ctrl.Result{}, nil } - logger = logger.WithValues("machine", machine.Name) - - // Fetch the Cluster. + log = log.WithValues("machine", machine.Name) cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) if err != nil { - logger.Info("Machine is missing cluster label or cluster does not exist") + log.Info("Machine is missing cluster label or cluster does not exist") return ctrl.Result{}, nil } if annotations.IsPaused(cluster, gcpMachine) { - logger.Info("GCPMachine or linked Cluster is marked as paused. Won't reconcile") + log.Info("GCPMachine or linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } - logger = logger.WithValues("cluster", cluster.Name) - + log = log.WithValues("cluster", cluster.Name) gcpCluster := &infrav1.GCPCluster{} - - gcpClusterName := client.ObjectKey{ + gcpClusterKey := client.ObjectKey{ Namespace: gcpMachine.Namespace, Name: cluster.Spec.InfrastructureRef.Name, } - if err := r.Client.Get(ctx, gcpClusterName, gcpCluster); err != nil { - logger.Info("GCPCluster is not available yet") - + if err := r.Client.Get(ctx, gcpClusterKey, gcpCluster); err != nil { + log.Info("GCPCluster is not available yet") return ctrl.Result{}, nil } - logger = logger.WithValues("gcpCluster", gcpCluster.Name) - // Create the cluster scope clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{ Client: r.Client, - Logger: logger, Cluster: cluster, GCPCluster: gcpCluster, }) @@ -169,12 +193,10 @@ func (r *GCPMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Create the machine scope machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ - Logger: logger, - Client: r.Client, - Cluster: cluster, - Machine: machine, - GCPCluster: gcpCluster, - GCPMachine: gcpMachine, + Client: r.Client, + Machine: machine, + GCPMachine: gcpMachine, + ClusterGetter: clusterScope, }) if err != nil { return ctrl.Result{}, errors.Errorf("failed to create scope: %+v", err) @@ -189,248 +211,57 @@ func (r *GCPMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Handle deleted machines if !gcpMachine.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(machineScope, clusterScope) + return r.reconcileDelete(ctx, machineScope) } // Handle non-deleted machines - return r.reconcile(ctx, machineScope, clusterScope) + return r.reconcile(ctx, machineScope) } -func (r *GCPMachineReconciler) reconcile(_ context.Context, machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (ctrl.Result, error) { - machineScope.Info("Reconciling GCPMachine") - // If the GCPMachine is in an error state, return early. - if machineScope.GCPMachine.Status.FailureReason != nil || machineScope.GCPMachine.Status.FailureMessage != nil { - machineScope.Info("Error state detected, skipping reconciliation") +func (r *GCPMachineReconciler) reconcile(ctx context.Context, machineScope *scope.MachineScope) (ctrl.Result, error) { + log := log.FromContext(ctx) + log.Info("Reconciling GCPMachine") - return ctrl.Result{}, nil - } - - // If the GCPMachine doesn't have our finalizer, add it. controllerutil.AddFinalizer(machineScope.GCPMachine, infrav1.MachineFinalizer) if err := machineScope.PatchObject(); err != nil { return ctrl.Result{}, err } - if !machineScope.Cluster.Status.InfrastructureReady { - machineScope.Info("Cluster infrastructure is not ready yet") - - return ctrl.Result{}, nil - } - - // Make sure bootstrap data is available and populated. - if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { - machineScope.Info("Bootstrap data secret reference is not yet available") - - return ctrl.Result{}, nil - } - - computeSvc := compute.NewService(clusterScope) - - // Get or create the instance. - instance, err := r.getOrCreate(machineScope, computeSvc) - if err != nil { + if err := instances.New(machineScope).Reconcile(ctx); err != nil { + log.Error(err, "Error reconciling instance resources") + record.Warnf(machineScope.GCPMachine, "GCPMachineReconcile", "Reconcile error - %v", err) return ctrl.Result{}, err } - // Set a failure message if we couldn't find the instance. - if instance == nil { - machineScope.SetFailureReason(capierrors.UpdateMachineError) - machineScope.SetFailureMessage(errors.New("GCE instance cannot be found")) - - return ctrl.Result{}, nil - } - - // Make sure Spec.ProviderID is always set. - machineScope.SetProviderID(fmt.Sprintf("gce://%s/%s/%s", clusterScope.Project(), machineScope.Zone(), instance.Name)) - - // Proceed to reconcile the GCPMachine state. - machineScope.SetInstanceStatus(infrav1.InstanceStatus(instance.Status)) - - machineScope.SetAddresses(r.getAddresses(instance)) - - switch infrav1.InstanceStatus(instance.Status) { + instanceState := *machineScope.GetInstanceStatus() + switch instanceState { + case infrav1.InstanceStatusProvisioning, infrav1.InstanceStatusStaging: + log.Info("GCPMachine instance is pending", "instance-id", *machineScope.GetInstanceID()) + record.Eventf(machineScope.GCPMachine, "GCPMachineReconcile", "GCPMachine instance is pending - instance-id: %s", *machineScope.GetInstanceID()) + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil case infrav1.InstanceStatusRunning: - machineScope.Info("Machine instance is running", "instance-id", *machineScope.GetInstanceID()) + log.Info("GCPMachine instance is running", "instance-id", *machineScope.GetInstanceID()) + record.Eventf(machineScope.GCPMachine, "GCPMachineReconcile", "GCPMachine instance is running - instance-id: %s", *machineScope.GetInstanceID()) + record.Event(machineScope.GCPMachine, "GCPMachineReconcile", "Reconciled") machineScope.SetReady() - case infrav1.InstanceStatusProvisioning, infrav1.InstanceStatusStaging: - machineScope.Info("Machine instance is pending", "instance-id", *machineScope.GetInstanceID()) + return ctrl.Result{}, nil default: machineScope.SetFailureReason(capierrors.UpdateMachineError) - machineScope.SetFailureMessage(errors.Errorf("GCE instance state %q is unexpected", instance.Status)) + machineScope.SetFailureMessage(errors.Errorf("GCPMachine instance state %s is unexpected", instanceState)) + return ctrl.Result{Requeue: true}, nil } - - if err := r.reconcileLBAttachment(machineScope, clusterScope, instance); err != nil { - return ctrl.Result{}, errors.Errorf("failed to reconcile LB attachment: %+v", err) - } - - return ctrl.Result{}, nil } -func (r *GCPMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (_ ctrl.Result, reterr error) { - machineScope.Info("Handling deleted GCPMachine") +func (r *GCPMachineReconciler) reconcileDelete(ctx context.Context, machineScope *scope.MachineScope) (_ ctrl.Result, reterr error) { + log := log.FromContext(ctx) + log.Info("Reconciling Delete GCPMachine") - computeSvc := compute.NewService(clusterScope) - - instance, err := r.findInstance(machineScope, computeSvc) - if err != nil { + if err := instances.New(machineScope).Delete(ctx); err != nil { + log.Error(err, "Error deleting instance resources") return ctrl.Result{}, err } - if instance == nil { - // The machine was never created or was deleted by some other entity - machineScope.V(3).Info("Unable to locate instance by ID or tags") - - controllerutil.RemoveFinalizer(machineScope.GCPMachine, infrav1.MachineFinalizer) - return ctrl.Result{}, nil - } - - // Check the instance state. If it's already shutting down or terminated, - // do nothing. Otherwise attempt to delete it. - switch infrav1.InstanceStatus(instance.Status) { - case infrav1.InstanceStatusTerminated: - machineScope.Info("Instance is shutting down or already terminated") - default: - machineScope.Info("Terminating instance") - if err := computeSvc.TerminateInstanceAndWait(machineScope); err != nil { - record.Warnf(machineScope.GCPMachine, "FailedTerminate", "Failed to terminate instance %q: %v", instance.Name, err) - - return ctrl.Result{}, errors.Errorf("failed to terminate instance: %+v", err) - } - record.Eventf(machineScope.GCPMachine, "SuccessfulTerminate", "Terminated instance %q", instance.Name) - } - - // Instance is deleted so remove the finalizer. controllerutil.RemoveFinalizer(machineScope.GCPMachine, infrav1.MachineFinalizer) - + record.Event(machineScope.GCPMachine, "GCPMachineReconcile", "Reconciled") return ctrl.Result{}, nil } - -// findInstance queries the GCP apis and retrieves the instance if it exists, returns nil otherwise. -func (r *GCPMachineReconciler) findInstance(scope *scope.MachineScope, computeSvc *compute.Service) (*gcompute.Instance, error) { - instance, err := computeSvc.InstanceIfExists(scope) - if err != nil { - return nil, errors.Wrapf(err, "failed to query GCPMachine instance") - } - - return instance, nil -} - -func (r *GCPMachineReconciler) getOrCreate(scope *scope.MachineScope, computeSvc *compute.Service) (*gcompute.Instance, error) { - instance, err := r.findInstance(scope, computeSvc) - if err != nil { - return nil, err - } - - if instance == nil { - // Create a new GCPMachine instance if we couldn't find a running instance. - instance, err = computeSvc.CreateInstance(scope) - if err != nil { - return nil, errors.Wrapf(err, "failed to create GCPMachine instance") - } - } - - return instance, nil -} - -func (r *GCPMachineReconciler) getAddresses(instance *gcompute.Instance) []corev1.NodeAddress { - addresses := make([]corev1.NodeAddress, 0, len(instance.NetworkInterfaces)) - for _, nic := range instance.NetworkInterfaces { - internalAddress := corev1.NodeAddress{ - Type: corev1.NodeInternalIP, - Address: nic.NetworkIP, - } - addresses = append(addresses, internalAddress) - - // If access configs are associated with this nic, dig out the external IP - if len(nic.AccessConfigs) > 0 { - externalAddress := corev1.NodeAddress{ - Type: corev1.NodeExternalIP, - Address: nic.AccessConfigs[0].NatIP, - } - addresses = append(addresses, externalAddress) - } - } - - return addresses -} - -func (r *GCPMachineReconciler) reconcileLBAttachment(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope, i *gcompute.Instance) error { - if !machineScope.IsControlPlane() { - return nil - } - computeSvc := compute.NewService(clusterScope) - groupName := fmt.Sprintf("%s-%s-%s", clusterScope.Name(), infrav1.APIServerRoleTagValue, machineScope.Zone()) - - // Get the instance group, or create if necessary. - group, err := computeSvc.GetOrCreateInstanceGroup(machineScope.Zone(), groupName) - if err != nil { - return err - } - - // Make sure the instance is registered. - if err := computeSvc.EnsureInstanceGroupMember(machineScope.Zone(), group.Name, i); err != nil { - return err - } - - // Update the backend service. - return computeSvc.UpdateBackendServices() -} - -// GCPClusterToGCPMachines is a handler.ToRequestsFunc to be used to enqeue requests for reconciliation of GCPMachines. -func (r *GCPMachineReconciler) GCPClusterToGCPMachines(o client.Object) []ctrl.Request { - c, ok := o.(*infrav1.GCPCluster) - if !ok { - r.Log.Error(errors.Errorf("expected a GCPCluster but got a %T", o), "failed to get GCPMachine for GCPCluster") - - return nil - } - log := r.Log.WithValues("GCPCluster", c.Name, "Namespace", c.Namespace) - - cluster, err := util.GetOwnerCluster(context.TODO(), r.Client, c.ObjectMeta) - switch { - case apierrors.IsNotFound(err) || cluster == nil: - return nil - case err != nil: - log.Error(err, "failed to get owning cluster") - - return nil - } - - return r.requestsForCluster(cluster.Namespace, cluster.Name) -} - -func (r *GCPMachineReconciler) requeueGCPMachinesForUnpausedCluster(o client.Object) []ctrl.Request { - c, ok := o.(*clusterv1.Cluster) - if !ok { - r.Log.Error(errors.Errorf("expected a Cluster but got a %T", o), "failed to get GCPMachines for unpaused Cluster") - - return nil - } - - // Don't handle deleted clusters - if !c.ObjectMeta.DeletionTimestamp.IsZero() { - return nil - } - - return r.requestsForCluster(c.Namespace, c.Name) -} - -func (r *GCPMachineReconciler) requestsForCluster(namespace, name string) []ctrl.Request { - log := r.Log.WithValues("Cluster", name, "Namespace", namespace) - labels := map[string]string{clusterv1.ClusterLabelName: name} - machineList := &clusterv1.MachineList{} - if err := r.Client.List(context.TODO(), machineList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil { - log.Error(err, "failed to get owned Machines") - - return nil - } - - result := make([]ctrl.Request, 0, len(machineList.Items)) - for _, m := range machineList.Items { - if m.Spec.InfrastructureRef.Name != "" { - result = append(result, ctrl.Request{NamespacedName: client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}}) - } - } - - return result -} diff --git a/controllers/gcpmachine_controller_test.go b/controllers/gcpmachine_controller_test.go index 29568ec9f..b8ef886c8 100644 --- a/controllers/gcpmachine_controller_test.go +++ b/controllers/gcpmachine_controller_test.go @@ -25,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" ) @@ -38,7 +37,6 @@ var _ = Describe("GCPMachineReconciler", func() { It("should not error with minimal set up", func() { reconciler := &GCPMachineReconciler{ Client: k8sClient, - Log: log.Log, } By("Calling reconcile") ctx := context.Background() diff --git a/controllers/gcpmachine_controller_unit_test.go b/controllers/gcpmachine_controller_unit_test.go index dbe45f792..0a1b60323 100644 --- a/controllers/gcpmachine_controller_unit_test.go +++ b/controllers/gcpmachine_controller_unit_test.go @@ -24,8 +24,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2/klogr" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1alpha4" @@ -84,9 +84,10 @@ func TestGCPMachineReconciler_GCPClusterToGCPMachines(t *testing.T) { reconciler := &GCPMachineReconciler{ Client: client, - Log: klogr.New(), } - requests := reconciler.GCPClusterToGCPMachines(&infrav1.GCPCluster{ + + fn := reconciler.GCPClusterToGCPMachines(ctrl.SetupSignalHandler()) + rr := fn(&infrav1.GCPCluster{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, Namespace: "default", @@ -99,5 +100,5 @@ func TestGCPMachineReconciler_GCPClusterToGCPMachines(t *testing.T) { }, }, }) - g.Expect(requests).To(HaveLen(2)) + g.Expect(rr).To(HaveLen(2)) } diff --git a/go.mod b/go.mod index a1d29fbab..cb7ce6660 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,12 @@ module sigs.k8s.io/cluster-api-provider-gcp go 1.16 require ( - github.com/blang/semver/v4 v4.0.0 - github.com/go-logr/logr v0.4.0 + github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.0 github.com/onsi/ginkgo v1.16.4 github.com/onsi/gomega v1.13.0 github.com/pkg/errors v0.9.1 github.com/spf13/pflag v1.0.5 + golang.org/x/mod v0.4.2 golang.org/x/net v0.0.0-20210614182718-04defd469f4e google.golang.org/api v0.48.0 k8s.io/api v0.21.2 @@ -22,4 +22,7 @@ require ( sigs.k8s.io/controller-runtime v0.9.0 ) -replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v0.4.0-beta.1 +replace ( + github.com/GoogleCloudPlatform/k8s-cloud-provider => github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210622065854-abbfeadc9fda + sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v0.4.0-beta.1 +) diff --git a/go.sum b/go.sum index b7a729847..edd18cb43 100644 --- a/go.sum +++ b/go.sum @@ -57,6 +57,8 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210622065854-abbfeadc9fda h1:I18CajUZOZ9o6BOSMF7uI2/xozIK4SEfMY+GtM4QxUE= +github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210622065854-abbfeadc9fda/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= @@ -114,8 +116,6 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= @@ -910,6 +910,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -978,6 +979,7 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1075,6 +1077,7 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1191,6 +1194,7 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0 h1:RDAPWfNFY06dffEXfn7hZF5Fr1ZbnChzfQZAPyBd1+I= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= @@ -1246,6 +1250,7 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 h1:pc16UedxnxXXtGxHCSUhafAoVHQZ0yXl8ZelMH4EETc= diff --git a/main.go b/main.go index c4aa96726..ffb3e9ab2 100644 --- a/main.go +++ b/main.go @@ -133,7 +133,6 @@ func main() { if err = (&controllers.GCPMachineReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("GCPMachine"), ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: gcpMachineConcurrency}); err != nil { @@ -142,7 +141,6 @@ func main() { } if err = (&controllers.GCPClusterReconciler{ Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("GCPCluster"), ReconcileTimeout: reconcileTimeout, WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: gcpClusterConcurrency}); err != nil {