diff --git a/pkg/client/client.go b/pkg/client/client.go index 3ce8b9d..1e60466 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -65,6 +65,68 @@ func (a *IONOSClient) GetServer(ctx context.Context, providerID string) (*cloudp return a.convertServerToInstanceMetadata(ctx, &server) } +func (a *IONOSClient) AttachIPToNode(ctx context.Context, loadBalancerIP, providerID string) (bool, error) { + if a.client == nil { + return false, errors.New("client isn't initialized") + } + + serverReq := a.client.NetworkInterfacesApi.DatacentersServersNicsGet(ctx, a.DatacenterId, providerID) + nics, req, err := serverReq.Depth(3).Execute() + if req != nil && req.StatusCode == 404 { + return false, err + } + + if !nics.HasItems() { + return false, nil + } + + primaryNic := (*nics.Items)[0] + ips := *primaryNic.Properties.Ips + ips = append(ips, loadBalancerIP) + + _, _, err = a.client.NetworkInterfacesApi.DatacentersServersNicsPatch(ctx, a.DatacenterId, providerID, *primaryNic.Id).Nic(ionoscloud.NicProperties{ + Ips: &ips, + }).Execute() + + return err != nil, err +} + +func (a *IONOSClient) GetServerByIP(ctx context.Context, loadBalancerIP string) (*string, error) { + if a.client == nil { + return nil, errors.New("client isn't initialized") + } + + serverReq := a.client.ServersApi.DatacentersServersGet(ctx, a.DatacenterId) + servers, req, err := serverReq.Depth(3).Execute() + if err != nil || req != nil && req.StatusCode == 404 { + if err != nil { + return nil, nil + } + return nil, err + } + + if !servers.HasItems() { + return nil, nil + } + + for _, server := range *servers.Items { + if !server.Entities.HasNics() { + continue + } + for _, nic := range *server.Entities.Nics.Items { + if nic.Properties.HasIps() { + for _, ip := range *nic.Properties.Ips { + if loadBalancerIP == ip { + return server.Properties.Name, nil + } + } + } + } + } + + return nil, nil +} + func (a *IONOSClient) datacenterLocation(ctx context.Context) (string, error) { if a.client == nil { return "", errors.New("client isn't initialized") diff --git a/pkg/ionos/cloud.go b/pkg/ionos/cloud.go index 4bd2cc0..66c53e1 100644 --- a/pkg/ionos/cloud.go +++ b/pkg/ionos/cloud.go @@ -34,19 +34,22 @@ func newProvider(config config.Config) cloudprovider.Interface { return IONOS{ config: config, instances: instances{ - clients: map[string]*client2.IONOSClient{}, + ionosClients: map[string]*client2.IONOSClient{}, + }, + loadbalancer: loadbalancer{ + ionosClients: map[string]*client2.IONOSClient{}, }, } } func (p IONOS) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, _ <-chan struct{}) { ctx := context.Background() - client, err := clientBuilder.Client(config.ClientName) + k8sClient, err := clientBuilder.Client(config.ClientName) if err != nil { klog.Errorf("Kubernetes Client Init Failed: %v", err) return } - secret, err := client.CoreV1().Secrets(p.config.TokenSecretNamespace).Get(ctx, p.config.TokenSecretName, metav1.GetOptions{}) + secret, err := k8sClient.CoreV1().Secrets(p.config.TokenSecretNamespace).Get(ctx, p.config.TokenSecretName, metav1.GetOptions{}) if err != nil { klog.Errorf("Failed to get secret %s/%s: %v", p.config.TokenSecretNamespace, p.config.TokenSecretName, err) return @@ -58,12 +61,17 @@ func (p IONOS) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, _ klog.Errorf("Failed to create client for datacenter %s: %v", key, err) return } + + err = p.loadbalancer.AddClient(key, token) + if err != nil { + klog.Errorf("Failed to create client for datacenter %s: %v", key, err) + return + } } } func (p IONOS) LoadBalancer() (cloudprovider.LoadBalancer, bool) { - klog.Warning("The IONOS cloud provider does not support load balancers") - return nil, false + return p.loadbalancer, true } func (p IONOS) Instances() (cloudprovider.Instances, bool) { diff --git a/pkg/ionos/instances.go b/pkg/ionos/instances.go index 9a9d8e0..39908e9 100644 --- a/pkg/ionos/instances.go +++ b/pkg/ionos/instances.go @@ -23,19 +23,19 @@ func GetUUIDFromNode(node *v1.Node) string { } func (i instances) AddClient(datacenterId string, token []byte) error { - if i.clients[datacenterId] == nil { + if i.ionosClients[datacenterId] == nil { c, err := client2.New(datacenterId, token) if err != nil { return err } - i.clients[datacenterId] = &c + i.ionosClients[datacenterId] = &c } return nil } // no caching func (i instances) discoverNode(ctx context.Context, node *v1.Node) (*cloudprovider.InstanceMetadata, error) { - for _, client := range i.clients { + for _, client := range i.ionosClients { var err error var server *cloudprovider.InstanceMetadata providerID := GetUUIDFromNode(node) diff --git a/pkg/ionos/loadbalancer.go b/pkg/ionos/loadbalancer.go new file mode 100644 index 0000000..d52f121 --- /dev/null +++ b/pkg/ionos/loadbalancer.go @@ -0,0 +1,187 @@ +package ionos + +import ( + "context" + "errors" + client2 "github.com/GDATASoftwareAG/cloud-provider-ionoscloud/pkg/client" + v1 "k8s.io/api/core/v1" + cloudprovider "k8s.io/cloud-provider" + "math/rand" + "time" +) + +var _ cloudprovider.LoadBalancer = &loadbalancer{} + +// see https://github.com/kubernetes/kubernetes/blob/v1.18.0/pkg/controller/service/controller.go + +func (l loadbalancer) AddClient(datacenterId string, token []byte) error { + if l.ionosClients[datacenterId] == nil { + c, err := client2.New(datacenterId, token) + if err != nil { + return err + } + l.ionosClients[datacenterId] = &c + } + return nil +} + +// GetLoadBalancer returns whether the specified load balancer exists, and +// if so, what its status is. +// Implementations must treat the *v1.Service parameter as read-only and not modify it. +// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager. +// For the given LB service, the GetLoadBalancer must return "exists=True" if +// there exists a LoadBalancer instance created by ServiceController. +// In all other cases, GetLoadBalancer must return a NotFound error. +func (l loadbalancer) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) { + //TODO check if any node has service.spec.loadbalancerip + // TODO check spec.loadbalancerclass + use .spec.loadbalancerip + if service.Spec.Type != v1.ServiceTypeLoadBalancer { + return nil, false, errors.New("NotFound") + } + + if service.Spec.LoadBalancerIP == "" { + return nil, false, errors.New("NotFound") + } + + panic("implement me") +} + +// GetLoadBalancerName returns the name of the load balancer. Implementations must treat the +// *v1.Service parameter as read-only and not modify it. +func (l loadbalancer) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string { + //TODO return service.metadata.uid + + panic("implement me") +} + +// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer +// Implementations must treat the *v1.Service and *v1.Node +// parameters as read-only and not modify them. +// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager. +// +// Implementations may return a (possibly wrapped) api.RetryError to enforce +// backing off at a fixed duration. This can be used for cases like when the +// load balancer is not ready yet (e.g., it is still being provisioned) and +// polling at a fixed rate is preferred over backing off exponentially in +// order to minimize latency. +func (l loadbalancer) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { + //TODO check if ip already attached to some node, attach loadbalancerIP to some node (not on controlplanes) + server, err := l.GetServerWithLoadBalancer(ctx, service.Spec.LoadBalancerIP) + if err != nil { + return nil, err + } + + if server != nil { + node := getNode(*server, nodes) + if IsLoadBalancerCandidate(node) { + return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{v1.LoadBalancerIngress{ + IP: service.Spec.LoadBalancerIP, + }}}, nil + } + } + + loadBalancerNode := l.GetLoadBalancerNode(nodes) + + if loadBalancerNode == nil { + return nil, errors.New("No valid Nodes found") + } + + for _, client := range l.ionosClients { + ok, err := client.AttachIPToNode(ctx, service.Spec.LoadBalancerIP, loadBalancerNode.Spec.ProviderID) + if err != nil { + return nil, err + } + + if ok { + return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{v1.LoadBalancerIngress{ + IP: service.Spec.LoadBalancerIP, + }}}, nil + } + } + + return nil, nil +} + +func getNode(server string, nodes []*v1.Node) *v1.Node { + for _, node := range nodes { + if node.Name == server { + return node + } + } + return nil +} + +func (l loadbalancer) GetLoadBalancerNode(nodes []*v1.Node) *v1.Node { + var candidates []*v1.Node + for _, node := range nodes { + if IsLoadBalancerCandidate(node) { + candidates = append(candidates, node) + } + } + + rand.Seed(time.Now().UnixNano()) + randomIndex := rand.Intn(len(candidates)) + return candidates[randomIndex] +} + +func (l loadbalancer) GetServerWithLoadBalancer(ctx context.Context, loadBalancerIP string) (*string, error) { + for _, client := range l.ionosClients { + server, err := client.GetServerByIP(ctx, loadBalancerIP) + if err != nil { + return nil, err + } + + if server != nil { + return server, nil + } + } + + return nil, nil +} + +func IsLoadBalancerCandidate(node *v1.Node) bool { + if IsControlPlane(node) { + return false + } + + for _, condition := range node.Status.Conditions { + if condition.Type == v1.NodeReady && condition.Status == v1.ConditionTrue { + return true + } + return false + } + return false +} + +func IsControlPlane(node *v1.Node) bool { + for _, taint := range node.Spec.Taints { + if taint.Key == "node-role.kubernetes.io/control-plane" { + return true + } + } + return false +} + +// UpdateLoadBalancer updates hosts under the specified load balancer. +// Implementations must treat the *v1.Service and *v1.Node +// parameters as read-only and not modify them. +// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager +func (l loadbalancer) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error { + //TODO same as EnsureLoadBalancer + panic("implement me") +} + +// EnsureLoadBalancerDeleted deletes the specified load balancer if it +// exists, returning nil if the load balancer specified either didn't exist or +// was successfully deleted. +// This construction is useful because many cloud providers' load balancers +// have multiple underlying components, meaning a Get could say that the LB +// doesn't exist even if some part of it is still laying around. +// Implementations must treat the *v1.Service parameter as read-only and not modify it. +// Parameter 'clusterName' is the name of the cluster as presented to kube-controller-manager +// EnsureLoadBalancerDeleted must not return ImplementedElsewhere to ensure +// proper teardown of resources that were allocated by the ServiceController. +func (l loadbalancer) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { + //TODO remove ip from node + panic("implement me") +} diff --git a/pkg/ionos/types.go b/pkg/ionos/types.go index 5cde4bf..f622eb9 100644 --- a/pkg/ionos/types.go +++ b/pkg/ionos/types.go @@ -6,11 +6,16 @@ import ( ) type IONOS struct { - config config.Config - instances instances - client *client.IONOSClient + config config.Config + instances instances + loadbalancer loadbalancer + client *client.IONOSClient } type instances struct { - clients map[string]*client.IONOSClient + ionosClients map[string]*client.IONOSClient +} + +type loadbalancer struct { + ionosClients map[string]*client.IONOSClient }