diff --git a/cluster-autoscaler/README.md b/cluster-autoscaler/README.md
index ac996cd63659..4c5f09cdf6fd 100644
--- a/cluster-autoscaler/README.md
+++ b/cluster-autoscaler/README.md
@@ -17,6 +17,7 @@ You should also take a look at the notes and "gotchas" for your specific cloud p
* [Azure](./cloudprovider/azure/README.md)
* [AWS](./cloudprovider/aws/README.md)
* [BaiduCloud](./cloudprovider/baiducloud/README.md)
+* [Brightbox](./cloudprovider/brightbox/README.md)
* [CloudStack](./cloudprovider/cloudstack/README.md)
* [HuaweiCloud](./cloudprovider/huaweicloud/README.md)
* [Hetzner](./cloudprovider/hetzner/README.md)
@@ -150,6 +151,7 @@ Supported cloud providers:
* AWS https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md
* Azure https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/azure/README.md
* Alibaba Cloud https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/alicloud/README.md
+* Brightbox https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/brightbox/README.md
* OpenStack Magnum https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/magnum/README.md
* DigitalOcean https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/digitalocean/README.md
* CloudStack https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/cloudstack/README.md
diff --git a/cluster-autoscaler/cloudprovider/brightbox/Makefile b/cluster-autoscaler/cloudprovider/brightbox/Makefile
new file mode 100644
index 000000000000..14c20c9dd550
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/Makefile
@@ -0,0 +1,44 @@
+export BUILD_TAGS=brightbox
+export REGISTRY=brightbox
+export GOARCH?=$(shell go env GOARCH)
+ifndef TAG
+ override TAG=dev
+endif
+export TAG
+ifeq ($(TAG), dev)
+ deploydeps=build
+endif
+
+.PHONY: deploy
+deploy: examples/config.rb $(deploydeps)
+ helm repo update
+ ruby $< | \
+ helm template release autoscaler/cluster-autoscaler \
+ --namespace kube-system -f - | \
+ kubectl -n kube-system apply -f -
+
+.PHONY: remove
+remove: examples/config.rb
+ helm repo update
+ ruby $< | \
+ helm template release autoscaler/cluster-autoscaler \
+ --namespace kube-system -f - | \
+ kubectl -n kube-system delete -f -
+
+.PHONY: secret
+secret: ${HOME}/.docker/config.json
+ -kubectl create secret generic regcred \
+ --from-file=.dockerconfigjson=$? \
+ --type=kubernetes.io/dockerconfigjson
+
+../../cluster-autoscaler: brightbox_cloud_provider.go brightbox_node_group.go
+ $(MAKE) -C $(@D) container
+ docker tag ${REGISTRY}/cluster-autoscaler-${BUILD_TAGS}-${GOARCH}:${TAG} ${REGISTRY}/cluster-autoscaler-${BUILD_TAGS}:${TAG}
+ docker push ${REGISTRY}/cluster-autoscaler-${BUILD_TAGS}:${TAG}
+
+.PHONY: build
+build: ../../cluster-autoscaler
+
+.PHONY: clean
+clean:
+ $(MAKE) -C ../.. $@
diff --git a/cluster-autoscaler/cloudprovider/brightbox/README.md b/cluster-autoscaler/cloudprovider/brightbox/README.md
new file mode 100644
index 000000000000..ca8498e50ce9
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/README.md
@@ -0,0 +1,162 @@
+# Cluster Autoscaler for Brightbox Cloud
+
+This cloud provider implements the autoscaling function for
+[Brightbox Cloud](https://www.brightbox.com). The autoscaler should
+work on any Kubernetes clusters running on Brightbox Cloud, however
+the approach is tailored to clusters built with the [Kubernetes Cluster
+Builder](https://github.com/brightbox/kubernetes-cluster)
+
+# How Autoscaler works on Brightbox Cloud
+
+The autoscaler looks for [Server
+Groups](https://www.brightbox.com/docs/guides/cli/server-groups/) named
+after the cluster-name option passed to the autoscaler (--cluster-name).
+
+A group named with a suffix of the cluster-name
+(e.g. k8s-worker.k8s-test.cluster.local) is a candidate to be a scaling
+group. The autoscaler will then check the description to see if it is
+a pair of integers separated by a colon (e.g. 1:4). If it finds those
+numbers then they will become the minimum and maximum server size for
+that group, and autoscaler will attempt to scale the group between those sizes.
+
+The type of server, the image used and the target zone will be
+dynamically determined from the existing members. If these differ, or
+there are no existing servers, autoscaler will log an error and will not
+scale that group.
+
+A group named precisely the same as the cluster-name
+(e.g. k8s-test.cluster.local) is considered to be the default cluster
+group and all autoscaled servers created are placed within it as well
+as the scaling group.
+
+The Brightbox Cloud provider only supports auto-discovery mode using
+this pattern. `node-group-auto-discovery` and `nodes` options are
+effectively ignored.
+
+## Cluster configuration
+
+If you are using the [Kubernetes Cluster
+Builder](https://github.com/brightbox/kubernetes-cluster) set the
+`worker_min` and `worker_max` values to scale the worker group, and the
+`storage_min` and `storage_max` values to scale the storage group.
+
+The Cluster Builder will ensure the group name and description are
+updated with the correct values in the format that autoscaler can recognise.
+
+Generally it is best to keep the `min` and the `count` values to be the
+same within the Cluster Buider and let autoscaler create and destroy
+servers dynamically up the the `max` value.
+
+While using autoscaler you may find that the Cluster Builder recreates
+servers that have been scaled down, if you use the manifests to maintain
+the cluster for other reasons (changing the management address for
+example). This is a limitation of the Terraform state database, and
+autoscaler will scale the cluster back down during the next few minutes.
+
+# Autoscaler Brightbox cloudprovider configuration
+
+The Brightbox Cloud cloudprovider is configured via Environment Variables
+suppied to the autoscaler pod. The easiest way to do this is to [create
+a secret](https://kubernetes.io/docs/concepts/configuration/secret/#creating-a-secret-manually) containing the variables within the `kube-system` namespace.
+
+```
+apiVersion: v1
+kind: Secret
+metadata:
+ name: brightbox-credentials
+ namespace: kube-system
+type: Opaque
+data:
+ BRIGHTBOX_API_URL:
+ BRIGHTBOX_CLIENT:
+ BRIGHTBOX_CLIENT_SECRET:
+ BRIGHTBOX_KUBE_JOIN_COMMAND:
+ BRIGHTBOX_KUBE_VERSION:
+```
+
+The join command can be obtained from the kubeadm token command
+
+```
+$ kubeadm token create --ttl 0 --description 'Cluster autoscaling token' --print-join-command
+```
+
+[Brightbox API
+Clients](https://www.brightbox.com/docs/guides/manager/api-clients/)
+can be created in the [Brightbox
+Manager](https://www.brightbox.com/docs/guides/manager/)
+
+## Cluster Configuration
+
+The [Kubernetes Cluster
+Builder](https://github.com/brightbox/kubernetes-cluster) creates a
+`brightbox-credentials` secret in the `kube-system` namespace ready
+to use.
+
+## Checking the environment
+
+You can check the brightbox-credentials secret by running the `check-env` job from the examples directory.
+
+```
+$ kubectl apply -f examples/check-env.yaml
+job.batch/check-env created
+$ kubectl -n kube-system logs job/check-env
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+HOSTNAME=check-env-hbh6m
+_BASH_GPG_KEY=7C0135FB088AAF6C66C650B9BB5869F064EA74AB
+_BASH_VERSION=5.0
+_BASH_PATCH_LEVEL=0
+_BASH_LATEST_PATCH=11
+BRIGHTBOX_KUBE_VERSION=1.17.0
+...
+$ kubectl delete -f examples/check-env.yaml
+job.batch "check-env" deleted
+```
+
+# Running the Autoscaler
+
+1. Clone this repository and change into this directory.
+1. Edit the `examples/config.rb` file and adjust the config hash.
+2. Alter the cluster name if
+required. (If you are using the [Kubernetes Cluster
+Builder](https://github.com/brightbox/kubernetes-cluster), this will be
+`cluster_name` and `cluster_domainname` joined with a '.')
+
+Then generate and apply the manifests
+```
+$ make deploy TAG=
+```
+
+where TAG is the version you wish to use (1.17, 1.18, etc.)
+
+As the Brightbox cloud-provider auto-detects and potentially scales all
+the worker groups, the example deployment file runs the autoscaler on
+the master nodes. This avoids it accidentally killing itself.
+
+## Viewing the cluster-autoscaler options
+
+Cluster autoscaler has many options that can be adjusted to better fit the needs of your application. To view them run
+
+```
+$ kubectl create job ca-options --image=brightbox/cluster-autoscaler-brightbox:dev -- ./cluster-autoscaler -h
+$ kubectl log job/ca-options
+```
+
+Remove the job in the normal way with `kubectl delete job/ca-options`
+
+You can read more details about some of the options in the [main FAQ](../../FAQ.md)
+
+
+# Building the Brightbox Cloud autoscaler
+
+Extract the repository to a machine running docker and then run the make command
+
+```
+$ make build
+```
+
+This builds an autoscaler containing only the Brightbox Cloud provider, tagged as `brightbox/cluster-autoscaler-brightbox:dev`. To build any other version add a TAG variable
+
+```
+make build TAG=1.1x
+```
+
diff --git a/cluster-autoscaler/cloudprovider/brightbox/brightbox_cloud_provider.go b/cluster-autoscaler/cloudprovider/brightbox/brightbox_cloud_provider.go
new file mode 100644
index 000000000000..f069f9019263
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/brightbox_cloud_provider.go
@@ -0,0 +1,336 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package brightbox
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+
+ apiv1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
+ brightbox "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/status"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk"
+ "k8s.io/autoscaler/cluster-autoscaler/config"
+ "k8s.io/autoscaler/cluster-autoscaler/utils/errors"
+ klog "k8s.io/klog/v2"
+)
+
+const (
+ // GPULabel is added to nodes with GPU resource
+ GPULabel = "cloud.brightbox.com/gpu-node"
+)
+
+var (
+ availableGPUTypes = map[string]struct{}{}
+)
+
+// brightboxCloudProvider implements cloudprovider.CloudProvider interface
+type brightboxCloudProvider struct {
+ resourceLimiter *cloudprovider.ResourceLimiter
+ ClusterName string
+ nodeGroups []cloudprovider.NodeGroup
+ nodeMap map[string]string
+ *k8ssdk.Cloud
+}
+
+// Name returns name of the cloud provider.
+func (b *brightboxCloudProvider) Name() string {
+ klog.V(4).Info("Name")
+ return cloudprovider.BrightboxProviderName
+}
+
+// NodeGroups returns all node groups configured for this cloud provider.
+func (b *brightboxCloudProvider) NodeGroups() []cloudprovider.NodeGroup {
+ klog.V(4).Info("NodeGroups")
+ // Duplicate the stored nodegroup elements and return it
+ //return append(b.nodeGroups[:0:0], b.nodeGroups...)
+ // Or just return the stored nodegroup elements by reference
+ return b.nodeGroups
+}
+
+// NodeGroupForNode returns the node group for the given node, nil if
+// the node should not be processed by cluster autoscaler, or non-nil
+// error if such occurred. Must be implemented.
+func (b *brightboxCloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovider.NodeGroup, error) {
+ klog.V(4).Info("NodeGroupForNode")
+ klog.V(4).Infof("Looking for %v", node.Spec.ProviderID)
+ groupID, ok := b.nodeMap[k8ssdk.MapProviderIDToServerID(node.Spec.ProviderID)]
+ if ok {
+ klog.V(4).Infof("Found in group %v", groupID)
+ return b.findNodeGroup(groupID), nil
+ }
+ klog.V(4).Info("Not found")
+ return nil, nil
+}
+
+// Refresh is before every main loop and can be used to dynamically
+// update cloud provider state.
+// In particular the list of node groups returned by NodeGroups can
+// change as a result of CloudProvider.Refresh().
+func (b *brightboxCloudProvider) Refresh() error {
+ klog.V(4).Info("Refresh")
+ configmaps, err := b.GetConfigMaps()
+ if err != nil {
+ return err
+ }
+ clusterSuffix := "." + b.ClusterName
+ nodeGroups := make([]cloudprovider.NodeGroup, 0)
+ nodeMap := make(map[string]string)
+ for _, configMapOutline := range configmaps {
+ if !strings.HasSuffix(configMapOutline.Name, clusterSuffix) {
+ klog.V(4).Infof("name %q doesn't match suffix %q. Ignoring %q", configMapOutline.Name, clusterSuffix, configMapOutline.Id)
+ continue
+ }
+ configMap, err := b.GetConfigMap(configMapOutline.Id)
+ if err != nil {
+ return err
+ }
+ klog.V(6).Infof("ConfigMap %+v", configMap)
+ mapData := make(map[string]string)
+ for k, v := range configMap.Data {
+ element, ok := v.(string)
+ if !ok {
+ return fmt.Errorf("Unexpected value for key %q in configMap %q", k, configMap.Id)
+ }
+ mapData[k] = element
+ }
+ klog.V(6).Infof("MapData: %+v", mapData)
+ minSize, err := strconv.Atoi(mapData["min"])
+ if err != nil {
+ klog.V(4).Info("Unable to retrieve minimum size. Ignoring")
+ continue
+ }
+ maxSize, err := strconv.Atoi(mapData["max"])
+ if err != nil {
+ klog.V(4).Info("Unable to retrieve maximum size. Ignoring")
+ continue
+ }
+ if minSize == maxSize {
+ klog.V(4).Infof("Group %q has a fixed size %d. Ignoring", mapData["server_group"], minSize)
+ continue
+ }
+ klog.V(4).Infof("Group %q: Node defaults found in %q. Adding to node group list", configMap.Data["server_group"], configMap.Id)
+ newNodeGroup := makeNodeGroupFromAPIDetails(
+ defaultServerName(configMap.Name),
+ mapData,
+ minSize,
+ maxSize,
+ b.Cloud,
+ )
+ group, err := b.GetServerGroup(newNodeGroup.Id())
+ if err != nil {
+ return err
+ }
+ for _, server := range group.Servers {
+ nodeMap[server.Id] = group.Id
+ }
+ nodeGroups = append(nodeGroups, newNodeGroup)
+ }
+ b.nodeGroups = nodeGroups
+ b.nodeMap = nodeMap
+ klog.V(4).Infof("Refresh located %v node(s) over %v group(s)", len(nodeMap), len(nodeGroups))
+ return nil
+}
+
+// Pricing returns pricing model for this cloud provider or error if
+// not available.
+// Implementation optional.
+func (b *brightboxCloudProvider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) {
+ klog.V(4).Info("Pricing")
+ return nil, cloudprovider.ErrNotImplemented
+}
+
+// GetAvailableMachineTypes get all machine types that can be requested
+// from the cloud provider.
+// Implementation optional.
+func (b *brightboxCloudProvider) GetAvailableMachineTypes() ([]string, error) {
+ klog.V(4).Info("GetAvailableMachineTypes")
+ return nil, cloudprovider.ErrNotImplemented
+}
+
+// NewNodeGroup builds a theoretical node group based on the node
+// definition provided. The node group is not automatically created on
+// the cloud provider side. The node group is not returned by NodeGroups()
+// until it is created.
+// Implementation optional.
+func (b *brightboxCloudProvider) NewNodeGroup(machineType string, labels map[string]string, systemLabels map[string]string, taints []apiv1.Taint, extraResources map[string]resource.Quantity) (cloudprovider.NodeGroup, error) {
+ klog.V(4).Info("newNodeGroup")
+ return nil, cloudprovider.ErrNotImplemented
+}
+
+// GetResourceLimiter returns struct containing limits (max, min) for
+// resources (cores, memory etc.).
+func (b *brightboxCloudProvider) GetResourceLimiter() (*cloudprovider.ResourceLimiter, error) {
+ klog.V(4).Info("GetResourceLimiter")
+ return b.resourceLimiter, nil
+}
+
+// GPULabel returns the label added to nodes with GPU resource.
+func (b *brightboxCloudProvider) GPULabel() string {
+ klog.V(4).Info("GPULabel")
+ return GPULabel
+}
+
+// GetAvailableGPUTypes return all available GPU types cloud provider
+// supports.
+func (b *brightboxCloudProvider) GetAvailableGPUTypes() map[string]struct{} {
+ klog.V(4).Info("GetAvailableGPUTypes")
+ return availableGPUTypes
+}
+
+// Cleanup cleans up open resources before the cloud provider is
+// destroyed, i.e. go routines etc.
+func (b *brightboxCloudProvider) Cleanup() error {
+ klog.V(4).Info("Cleanup")
+ return nil
+}
+
+// BuildBrightbox builds the Brightbox provider
+func BuildBrightbox(
+ opts config.AutoscalingOptions,
+ do cloudprovider.NodeGroupDiscoveryOptions,
+ rl *cloudprovider.ResourceLimiter,
+) cloudprovider.CloudProvider {
+ klog.V(4).Info("BuildBrightbox")
+ klog.V(4).Infof("Config: %+v", opts)
+ klog.V(4).Infof("Discovery Options: %+v", do)
+ if opts.CloudConfig != "" {
+ klog.Warning("supplied config is not read by this version. Using environment")
+ }
+ if opts.ClusterName == "" {
+ klog.Fatal("Set the cluster name option to the Fully Qualified Internal Domain Name of the cluster")
+ }
+ newCloudProvider := &brightboxCloudProvider{
+ ClusterName: opts.ClusterName,
+ resourceLimiter: rl,
+ Cloud: &k8ssdk.Cloud{},
+ }
+ _, err := newCloudProvider.CloudClient()
+ if err != nil {
+ klog.Fatalf("Failed to create Brightbox Cloud Client: %v", err)
+ }
+ return newCloudProvider
+}
+
+//private
+
+func (b *brightboxCloudProvider) findNodeGroup(groupID string) cloudprovider.NodeGroup {
+ klog.V(4).Info("findNodeGroup")
+ klog.V(4).Infof("Looking for %q", groupID)
+ for _, nodeGroup := range b.nodeGroups {
+ if nodeGroup.Id() == groupID {
+ return nodeGroup
+ }
+ }
+ return nil
+}
+
+func defaultServerName(name string) string {
+ klog.V(4).Info("defaultServerName")
+ klog.V(4).Infof("group name is %q", name)
+ return "auto." + name
+}
+
+func fetchDefaultGroup(groups []brightbox.ServerGroup, clusterName string) string {
+ klog.V(4).Info("findDefaultGroup")
+ klog.V(4).Infof("for cluster %q", clusterName)
+ for _, group := range groups {
+ if group.Name == clusterName {
+ return group.Id
+ }
+ }
+ klog.Warningf("Unable to detect main group for cluster %q", clusterName)
+ return ""
+}
+
+type idWithStatus struct {
+ id string
+ status string
+}
+
+func (b *brightboxCloudProvider) extractGroupDefaults(servers []brightbox.Server) (string, string, string, error) {
+ klog.V(4).Info("extractGroupDefaults")
+ const zoneSentinel string = "dummyValue"
+ zoneID := zoneSentinel
+ var serverType, image idWithStatus
+ for _, serverSummary := range servers {
+ server, err := b.GetServer(
+ context.Background(),
+ serverSummary.Id,
+ serverNotFoundError(serverSummary.Id),
+ )
+ if err != nil {
+ return "", "", "", err
+ }
+ image = checkForChange(image, idWithStatus{server.Image.Id, server.Image.Status}, "Group has multiple Image Ids")
+ serverType = checkForChange(serverType, idWithStatus{server.ServerType.Id, server.ServerType.Status}, "Group has multiple ServerType Ids")
+ zoneID = checkZoneForChange(zoneID, server.Zone.Id, zoneSentinel)
+ }
+ switch {
+ case serverType.id == "":
+ return "", "", "", fmt.Errorf("Unable to determine Server Type details from Group")
+ case image.id == "":
+ return "", "", "", fmt.Errorf("Unable to determine Image details from Group")
+ case zoneID == zoneSentinel:
+ return "", "", "", fmt.Errorf("Unable to determine Zone details from Group")
+ case image.status == status.Deprecated:
+ klog.Warningf("Selected image %q is deprecated. Please update to an available version", image.id)
+ }
+ return serverType.id, image.id, zoneID, nil
+}
+
+func checkZoneForChange(zoneID string, newZoneID string, sentinel string) string {
+ klog.V(4).Info("checkZoneForChange")
+ klog.V(4).Infof("new %q, existing %q", newZoneID, zoneID)
+ switch zoneID {
+ case newZoneID, sentinel:
+ return newZoneID
+ default:
+ klog.V(4).Info("Group is zone balanced")
+ return ""
+ }
+}
+
+func checkForChange(current idWithStatus, newDetails idWithStatus, errorMessage string) idWithStatus {
+ klog.V(4).Info("checkForChange")
+ klog.V(4).Infof("new %v, existing %v", newDetails, current)
+ switch {
+ case newDetails == current:
+ // Skip to end
+ case newDetails.status == status.Available:
+ if current.id == "" || current.status == status.Deprecated {
+ klog.V(4).Infof("Object %q is available. Selecting", newDetails.id)
+ return newDetails
+ }
+ // Multiple ids
+ klog.Warning(errorMessage)
+ case newDetails.status == status.Deprecated:
+ if current.id == "" {
+ klog.V(4).Infof("Object %q is deprecated, but selecting anyway", newDetails.id)
+ return newDetails
+ }
+ // Multiple ids
+ klog.Warning(errorMessage)
+ default:
+ klog.Warningf("Object %q is no longer available. Ignoring.", newDetails.id)
+ }
+ return current
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/brightbox_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/brightbox/brightbox_cloud_provider_test.go
new file mode 100644
index 000000000000..bf284be8ab67
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/brightbox_cloud_provider_test.go
@@ -0,0 +1,735 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package brightbox
+
+import (
+ "encoding/json"
+ "flag"
+ "os"
+ "os/exec"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
+ brightbox "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks"
+ "k8s.io/autoscaler/cluster-autoscaler/config"
+ klog "k8s.io/klog/v2"
+)
+
+const (
+ fakeServer = "srv-testy"
+ fakeGroup = "grp-testy"
+ missingServer = "srv-notty"
+ fakeClusterName = "k8s-fake.cluster.local"
+)
+
+var (
+ fakeNodeMap = map[string]string{
+ fakeServer: fakeGroup,
+ }
+ fakeNodeGroup = &brightboxNodeGroup{
+ id: fakeGroup,
+ }
+ fakeNodeGroups = []cloudprovider.NodeGroup{
+ fakeNodeGroup,
+ }
+)
+
+func init() {
+ klog.InitFlags(nil)
+ flag.Set("alsologtostderr", "true")
+ flag.Set("v", "4")
+}
+
+func TestMain(m *testing.M) {
+ flag.Parse()
+ os.Exit(m.Run())
+}
+
+func TestName(t *testing.T) {
+ assert.Equal(t, makeFakeCloudProvider(nil).Name(), cloudprovider.BrightboxProviderName)
+}
+
+func TestGPULabel(t *testing.T) {
+ assert.Equal(t, makeFakeCloudProvider(nil).GPULabel(), GPULabel)
+}
+
+func TestGetAvailableGPUTypes(t *testing.T) {
+ assert.Equal(t, makeFakeCloudProvider(nil).GetAvailableGPUTypes(), availableGPUTypes)
+}
+
+func TestPricing(t *testing.T) {
+ obj, err := makeFakeCloudProvider(nil).Pricing()
+ assert.Equal(t, err, cloudprovider.ErrNotImplemented)
+ assert.Nil(t, obj)
+}
+
+func TestGetAvailableMachineTypes(t *testing.T) {
+ obj, err := makeFakeCloudProvider(nil).GetAvailableMachineTypes()
+ assert.Equal(t, err, cloudprovider.ErrNotImplemented)
+ assert.Nil(t, obj)
+}
+
+func TestNewNodeGroup(t *testing.T) {
+ obj, err := makeFakeCloudProvider(nil).NewNodeGroup("", nil, nil, nil, nil)
+ assert.Equal(t, err, cloudprovider.ErrNotImplemented)
+ assert.Nil(t, obj)
+}
+
+func TestCleanUp(t *testing.T) {
+ assert.Nil(t, makeFakeCloudProvider(nil).Cleanup())
+}
+
+func TestResourceLimiter(t *testing.T) {
+ client := makeFakeCloudProvider(nil)
+ obj, err := client.GetResourceLimiter()
+ assert.Equal(t, obj, client.resourceLimiter)
+ assert.NoError(t, err)
+}
+
+func TestNodeGroups(t *testing.T) {
+ client := makeFakeCloudProvider(nil)
+ assert.Zero(t, client.NodeGroups())
+ client.nodeGroups = make([]cloudprovider.NodeGroup, 0)
+ assert.NotZero(t, client.NodeGroups())
+ assert.Empty(t, client.NodeGroups())
+ nodeGroup := &brightboxNodeGroup{}
+ client.nodeGroups = append(client.nodeGroups, nodeGroup)
+ newGroups := client.NodeGroups()
+ assert.Len(t, newGroups, 1)
+ assert.Same(t, newGroups[0], client.nodeGroups[0])
+}
+
+func TestNodeGroupForNode(t *testing.T) {
+ client := makeFakeCloudProvider(nil)
+ client.nodeGroups = fakeNodeGroups
+ client.nodeMap = fakeNodeMap
+ nodeGroup, err := client.NodeGroupForNode(makeNode(fakeServer))
+ assert.Equal(t, fakeNodeGroup, nodeGroup)
+ assert.NoError(t, err)
+ nodeGroup, err = client.NodeGroupForNode(makeNode(missingServer))
+ assert.Nil(t, nodeGroup)
+ assert.NoError(t, err)
+}
+
+func TestBuildBrightBox(t *testing.T) {
+ ts := k8ssdk.GetAuthEnvTokenHandler(t)
+ defer k8ssdk.ResetAuthEnvironment()
+ defer ts.Close()
+ rl := cloudprovider.NewResourceLimiter(nil, nil)
+ do := cloudprovider.NodeGroupDiscoveryOptions{}
+ opts := config.AutoscalingOptions{
+ CloudProviderName: cloudprovider.BrightboxProviderName,
+ ClusterName: fakeClusterName,
+ }
+ cloud := BuildBrightbox(opts, do, rl)
+ assert.Equal(t, cloud.Name(), cloudprovider.BrightboxProviderName)
+ obj, err := cloud.GetResourceLimiter()
+ assert.Equal(t, rl, obj)
+ assert.NoError(t, err)
+}
+
+func testOsExit(t *testing.T, funcName string, testFunc func(*testing.T)) {
+ if os.Getenv(funcName) == "1" {
+ testFunc(t)
+ return
+ }
+ cmd := exec.Command(os.Args[0], "-test.run="+funcName)
+ cmd.Env = append(os.Environ(), funcName+"=1")
+ err := cmd.Run()
+ if e, ok := err.(*exec.ExitError); ok && !e.Success() {
+ return
+ }
+ t.Fatalf("%s subprocess ran successfully, want non-zero exit status", funcName)
+}
+
+func TestBuildBrightboxMissingClusterName(t *testing.T) {
+ testOsExit(t, "TestBuildBrightboxMissingClusterName", func(t *testing.T) {
+ ts := k8ssdk.GetAuthEnvTokenHandler(t)
+ defer k8ssdk.ResetAuthEnvironment()
+ defer ts.Close()
+ rl := cloudprovider.NewResourceLimiter(nil, nil)
+ do := cloudprovider.NodeGroupDiscoveryOptions{}
+ opts := config.AutoscalingOptions{
+ CloudProviderName: cloudprovider.BrightboxProviderName,
+ }
+ BuildBrightbox(opts, do, rl)
+ })
+}
+
+func TestRefresh(t *testing.T) {
+ mockclient := new(mocks.CloudAccess)
+ testclient := k8ssdk.MakeTestClient(mockclient, nil)
+ provider := makeFakeCloudProvider(testclient)
+ groups := fakeGroups()
+ mockclient.On("ServerGroup", "grp-sda44").Return(fakeServerGroupsda44(), nil)
+ mockclient.On("ConfigMaps").Return(fakeConfigMaps(), nil)
+ mockclient.On("ConfigMap", "cfg-502vh").Return(fakeConfigMap502vh(), nil)
+ err := provider.Refresh()
+ require.NoError(t, err)
+ assert.Len(t, provider.nodeGroups, 1)
+ assert.NotEmpty(t, provider.nodeMap)
+ node, err := provider.NodeGroupForNode(makeNode("srv-lv426"))
+ assert.NoError(t, err)
+ require.NotNil(t, node)
+ assert.Equal(t, node.Id(), groups[0].Id)
+ node, err = provider.NodeGroupForNode(makeNode("srv-rp897"))
+ assert.NoError(t, err)
+ require.NotNil(t, node)
+ assert.Equal(t, node.Id(), groups[0].Id)
+ mockclient.AssertExpectations(t)
+}
+
+func TestFetchDefaultGroup(t *testing.T) {
+ groups := fakeGroups()
+ groupID := fetchDefaultGroup(groups, "fred")
+ assert.Empty(t, groupID)
+ groupID = fetchDefaultGroup(groups, groups[0].Name)
+ assert.Equal(t, groups[0].Id, groupID)
+}
+
+func makeNode(serverID string) *v1.Node {
+ return &v1.Node{
+ Spec: v1.NodeSpec{
+ ProviderID: k8ssdk.MapServerIDToProviderID(serverID),
+ },
+ }
+}
+
+func makeFakeCloudProvider(brightboxCloudClient *k8ssdk.Cloud) *brightboxCloudProvider {
+ return &brightboxCloudProvider{
+ resourceLimiter: &cloudprovider.ResourceLimiter{},
+ ClusterName: fakeClusterName,
+ Cloud: brightboxCloudClient,
+ }
+}
+
+func fakeConfigMaps() []brightbox.ConfigMap {
+ const groupjson = `
+[{
+ "id": "cfg-502vh",
+ "resource_type": "config_map",
+ "url": "https://api.gb1.brightbox.com/1.0/config_maps/cfg-502vh",
+ "name": "storage.k8s-fake.cluster.local",
+ "data": {
+ "image": "img-svqx9",
+ "max": "4",
+ "min": "1",
+ "region": "gb1",
+ "server_group": "grp-sda44",
+ "default_group": "grp-vnr33",
+ "type": "2gb.ssd",
+ "user_data": "fake_userdata",
+ "zone": ""
+ }
+ }]
+ `
+ var result []brightbox.ConfigMap
+ _ = json.NewDecoder(strings.NewReader(groupjson)).Decode(&result)
+ return result
+}
+
+func fakeConfigMap502vh() *brightbox.ConfigMap {
+ const groupjson = `
+{
+ "id": "cfg-502vh",
+ "resource_type": "config_map",
+ "url": "https://api.gb1.brightbox.com/1.0/config_maps/cfg-502vh",
+ "name": "storage.k8s-fake.cluster.local",
+ "data": {
+ "image": "img-svqx9",
+ "max": "4",
+ "min": "1",
+ "region": "gb1",
+ "server_group": "grp-sda44",
+ "default_group": "grp-vnr33",
+ "type": "2gb.ssd",
+ "user_data": "fake_userdata",
+ "zone": ""
+ }
+ }
+ `
+ var result brightbox.ConfigMap
+ _ = json.NewDecoder(strings.NewReader(groupjson)).Decode(&result)
+ return &result
+}
+
+func fakeServerGroupsda44() *brightbox.ServerGroup {
+ const groupjson = `
+{"id": "grp-sda44",
+ "resource_type": "server_group",
+ "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-sda44",
+ "name": "storage.k8s-fake.cluster.local",
+ "description": "1:4",
+ "created_at": "2011-10-01T00:00:00Z",
+ "default": true,
+ "account":
+ {"id": "acc-43ks4",
+ "resource_type": "account",
+ "url": "https://api.gb1.brightbox.com/1.0/accounts/acc-43ks4",
+ "name": "Brightbox",
+ "status": "active"},
+ "firewall_policy":
+ {"id": "fwp-j3654",
+ "resource_type": "firewall_policy",
+ "url": "https://api.gb1.brightbox.com/1.0/firewall_policies/fwp-j3654",
+ "default": true,
+ "name": "default",
+ "created_at": "2011-10-01T00:00:00Z",
+ "description": null},
+ "servers":
+ [
+ {"id": "srv-lv426",
+ "resource_type": "server",
+ "url": "https://api.gb1.brightbox.com/1.0/servers/srv-lv426",
+ "name": "",
+ "status": "active",
+ "locked": false,
+ "hostname": "srv-lv426",
+ "fqdn": "srv-lv426.gb1.brightbox.com",
+ "created_at": "2011-10-01T01:00:00Z",
+ "started_at": "2011-10-01T01:01:00Z",
+ "deleted_at": null},
+ {"id": "srv-rp897",
+ "resource_type": "server",
+ "url": "https://api.gb1.brightbox.com/1.0/servers/srv-rp897",
+ "name": "",
+ "status": "active",
+ "locked": false,
+ "hostname": "srv-rp897",
+ "fqdn": "srv-rp897.gb1.brightbox.com",
+ "created_at": "2011-10-01T01:00:00Z",
+ "started_at": "2011-10-01T01:01:00Z",
+ "deleted_at": null}
+ ]}
+ `
+ var result brightbox.ServerGroup
+ _ = json.NewDecoder(strings.NewReader(groupjson)).Decode(&result)
+ return &result
+}
+
+func fakeGroups() []brightbox.ServerGroup {
+ const groupjson = `
+[{"id": "grp-sda44",
+ "resource_type": "server_group",
+ "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-sda44",
+ "name": "storage.k8s-fake.cluster.local",
+ "description": "1:4",
+ "created_at": "2011-10-01T00:00:00Z",
+ "default": true,
+ "account":
+ {"id": "acc-43ks4",
+ "resource_type": "account",
+ "url": "https://api.gb1.brightbox.com/1.0/accounts/acc-43ks4",
+ "name": "Brightbox",
+ "status": "active"},
+ "firewall_policy":
+ {"id": "fwp-j3654",
+ "resource_type": "firewall_policy",
+ "url": "https://api.gb1.brightbox.com/1.0/firewall_policies/fwp-j3654",
+ "default": true,
+ "name": "default",
+ "created_at": "2011-10-01T00:00:00Z",
+ "description": null},
+ "servers":
+ [
+ {"id": "srv-lv426",
+ "resource_type": "server",
+ "url": "https://api.gb1.brightbox.com/1.0/servers/srv-lv426",
+ "name": "",
+ "status": "active",
+ "locked": false,
+ "hostname": "srv-lv426",
+ "fqdn": "srv-lv426.gb1.brightbox.com",
+ "created_at": "2011-10-01T01:00:00Z",
+ "started_at": "2011-10-01T01:01:00Z",
+ "deleted_at": null},
+ {"id": "srv-rp897",
+ "resource_type": "server",
+ "url": "https://api.gb1.brightbox.com/1.0/servers/srv-rp897",
+ "name": "",
+ "status": "active",
+ "locked": false,
+ "hostname": "srv-rp897",
+ "fqdn": "srv-rp897.gb1.brightbox.com",
+ "created_at": "2011-10-01T01:00:00Z",
+ "started_at": "2011-10-01T01:01:00Z",
+ "deleted_at": null}
+ ]}]
+ `
+ var result []brightbox.ServerGroup
+ _ = json.NewDecoder(strings.NewReader(groupjson)).Decode(&result)
+ return result
+}
+
+func fakeServerlv426() *brightbox.Server {
+ const serverjson = `
+{"id": "srv-lv426",
+ "resource_type": "server",
+ "url": "https://api.gb1.brightbox.com/1.0/servers/srv-lv426",
+ "name": "storage-0.storage.k8s-fake.cluster.local",
+ "status": "active",
+ "locked": false,
+ "hostname": "srv-lv426",
+ "created_at": "2011-10-01T01:00:00Z",
+ "started_at": "2011-10-01T01:01:00Z",
+ "deleted_at": null,
+ "user_data": null,
+ "fqdn": "srv-lv426.gb1.brightbox.com",
+ "compatibility_mode": false,
+ "console_url": null,
+ "console_token": null,
+ "console_token_expires": null,
+ "account":
+ {"id": "acc-43ks4",
+ "resource_type": "account",
+ "url": "https://api.gb1.brightbox.com/1.0/accounts/acc-43ks4",
+ "name": "Brightbox",
+ "status": "active"},
+ "image":
+ {"id": "img-3ikco",
+ "resource_type": "image",
+ "url": "https://api.gb1.brightbox.com/1.0/images/img-3ikco",
+ "name": "Ubuntu Lucid 10.04 server",
+ "username": "ubuntu",
+ "status": "available",
+ "locked": false,
+ "description": "Expands root partition automatically. login: ubuntu using stored ssh key",
+ "source": "ubuntu-lucid-daily-i64-server-20110509",
+ "arch": "x86_64",
+ "created_at": "2011-05-09T12:00:00Z",
+ "official": true,
+ "public": true,
+ "owner": "acc-43ks4"},
+ "server_type":
+ {"id": "typ-zx45f",
+ "resource_type": "server_type",
+ "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-zx45f",
+ "name": "Small",
+ "status": "available",
+ "cores": 2,
+ "ram": 2048,
+ "disk_size": 81920,
+ "handle": "small"},
+ "zone":
+ {"id": "zon-328ds",
+ "resource_type": "zone",
+ "url": "https://api.gb1.brightbox.com/1.0/zones/zon-328ds",
+ "handle": "gb1"},
+ "cloud_ips":
+ [{"id": "cip-k4a25",
+ "resource_type": "cloud_ip",
+ "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-k4a25",
+ "status": "mapped",
+ "public_ip": "109.107.50.0",
+ "public_ipv4": "109.107.50.0",
+ "public_ipv6": "2a02:1348:ffff:ffff::6d6b:3200",
+ "fqdn": "cip-k4a25.gb1.brightbox.com",
+ "reverse_dns": null,
+ "name": "product website ip"}],
+ "interfaces":
+ [{"id": "int-ds42k",
+ "resource_type": "interface",
+ "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-ds42k",
+ "mac_address": "02:24:19:00:00:ee",
+ "ipv4_address": "81.15.16.17"}],
+ "snapshots":
+ [],
+ "server_groups":
+ [{"id": "grp-sda44",
+ "resource_type": "server_group",
+ "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-sda44",
+ "name": "",
+ "description": null,
+ "created_at": "2011-10-01T00:00:00Z",
+ "default": true}]}
+`
+ var result brightbox.Server
+ _ = json.NewDecoder(strings.NewReader(serverjson)).Decode(&result)
+ return &result
+}
+
+func fakeServerTypezx45f() *brightbox.ServerType {
+ const serverjson = `
+{"id": "typ-zx45f",
+ "resource_type": "server_type",
+ "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-zx45f",
+ "name": "Small",
+ "status": "available",
+ "cores": 2,
+ "ram": 2048,
+ "disk_size": 81920,
+ "handle": "small"}
+`
+ var result brightbox.ServerType
+ _ = json.NewDecoder(strings.NewReader(serverjson)).Decode(&result)
+ return &result
+}
+
+func fakeServerrp897() *brightbox.Server {
+ const serverjson = `
+{"id": "srv-rp897",
+ "resource_type": "server",
+ "url": "https://api.gb1.brightbox.com/1.0/servers/srv-rp897",
+ "name": "storage-0.storage.k8s-fake.cluster.local",
+ "status": "active",
+ "locked": false,
+ "hostname": "srv-rp897",
+ "created_at": "2011-10-01T01:00:00Z",
+ "started_at": "2011-10-01T01:01:00Z",
+ "deleted_at": null,
+ "user_data": null,
+ "fqdn": "srv-rp897.gb1.brightbox.com",
+ "compatibility_mode": false,
+ "console_url": null,
+ "console_token": null,
+ "console_token_expires": null,
+ "account":
+ {"id": "acc-43ks4",
+ "resource_type": "account",
+ "url": "https://api.gb1.brightbox.com/1.0/accounts/acc-43ks4",
+ "name": "Brightbox",
+ "status": "active"},
+ "image":
+ {"id": "img-3ikco",
+ "resource_type": "image",
+ "url": "https://api.gb1.brightbox.com/1.0/images/img-3ikco",
+ "name": "Ubuntu Lucid 10.04 server",
+ "username": "ubuntu",
+ "status": "available",
+ "locked": false,
+ "description": "Expands root partition automatically. login: ubuntu using stored ssh key",
+ "source": "ubuntu-lucid-daily-i64-server-20110509",
+ "arch": "x86_64",
+ "created_at": "2011-05-09T12:00:00Z",
+ "official": true,
+ "public": true,
+ "owner": "acc-43ks4"},
+ "server_type":
+ {"id": "typ-zx45f",
+ "resource_type": "server_type",
+ "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-zx45f",
+ "name": "Small",
+ "status": "available",
+ "cores": 2,
+ "ram": 2048,
+ "disk_size": 81920,
+ "handle": "small"},
+ "zone":
+ {"id": "zon-328ds",
+ "resource_type": "zone",
+ "url": "https://api.gb1.brightbox.com/1.0/zones/zon-328ds",
+ "handle": "gb1"},
+ "cloud_ips":
+ [{"id": "cip-k4a25",
+ "resource_type": "cloud_ip",
+ "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-k4a25",
+ "status": "mapped",
+ "public_ip": "109.107.50.0",
+ "public_ipv4": "109.107.50.0",
+ "public_ipv6": "2a02:1348:ffff:ffff::6d6b:3200",
+ "fqdn": "cip-k4a25.gb1.brightbox.com",
+ "reverse_dns": null,
+ "name": "product website ip"}],
+ "interfaces":
+ [{"id": "int-ds42k",
+ "resource_type": "interface",
+ "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-ds42k",
+ "mac_address": "02:24:19:00:00:ee",
+ "ipv4_address": "81.15.16.17"}],
+ "snapshots":
+ [],
+ "server_groups":
+ [{"id": "grp-sda44",
+ "resource_type": "server_group",
+ "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-sda44",
+ "name": "",
+ "description": null,
+ "created_at": "2011-10-01T00:00:00Z",
+ "default": true}]}
+`
+ var result brightbox.Server
+ _ = json.NewDecoder(strings.NewReader(serverjson)).Decode(&result)
+ return &result
+}
+
+func fakeServertesty() *brightbox.Server {
+ const serverjson = `
+{"id": "srv-testy",
+ "resource_type": "server",
+ "url": "https://api.gb1.brightbox.com/1.0/servers/srv-testy",
+ "name": "storage-0.storage.k8s-fake.cluster.local",
+ "status": "active",
+ "locked": false,
+ "hostname": "srv-testy",
+ "created_at": "2011-10-01T01:00:00Z",
+ "started_at": "2011-10-01T01:01:00Z",
+ "deleted_at": null,
+ "user_data": null,
+ "fqdn": "srv-testy.gb1.brightbox.com",
+ "compatibility_mode": false,
+ "console_url": null,
+ "console_token": null,
+ "console_token_expires": null,
+ "account":
+ {"id": "acc-43ks4",
+ "resource_type": "account",
+ "url": "https://api.gb1.brightbox.com/1.0/accounts/acc-43ks4",
+ "name": "Brightbox",
+ "status": "active"},
+ "image":
+ {"id": "img-3ikco",
+ "resource_type": "image",
+ "url": "https://api.gb1.brightbox.com/1.0/images/img-3ikco",
+ "name": "Ubuntu Lucid 10.04 server",
+ "username": "ubuntu",
+ "status": "available",
+ "locked": false,
+ "description": "Expands root partition automatically. login: ubuntu using stored ssh key",
+ "source": "ubuntu-lucid-daily-i64-server-20110509",
+ "arch": "x86_64",
+ "created_at": "2011-05-09T12:00:00Z",
+ "official": true,
+ "public": true,
+ "owner": "acc-43ks4"},
+ "server_type":
+ {"id": "typ-zx45f",
+ "resource_type": "server_type",
+ "url": "https://api.gb1.brightbox.com/1.0/server_types/typ-zx45f",
+ "name": "Small",
+ "status": "available",
+ "cores": 2,
+ "ram": 2048,
+ "disk_size": 81920,
+ "handle": "small"},
+ "zone":
+ {"id": "zon-328ds",
+ "resource_type": "zone",
+ "url": "https://api.gb1.brightbox.com/1.0/zones/zon-328ds",
+ "handle": "gb1"},
+ "cloud_ips":
+ [{"id": "cip-k4a25",
+ "resource_type": "cloud_ip",
+ "url": "https://api.gb1.brightbox.com/1.0/cloud_ips/cip-k4a25",
+ "status": "mapped",
+ "public_ip": "109.107.50.0",
+ "public_ipv4": "109.107.50.0",
+ "public_ipv6": "2a02:1348:ffff:ffff::6d6b:3200",
+ "fqdn": "cip-k4a25.gb1.brightbox.com",
+ "reverse_dns": null,
+ "name": "product website ip"}],
+ "interfaces":
+ [{"id": "int-ds42k",
+ "resource_type": "interface",
+ "url": "https://api.gb1.brightbox.com/1.0/interfaces/int-ds42k",
+ "mac_address": "02:24:19:00:00:ee",
+ "ipv4_address": "81.15.16.17"}],
+ "snapshots":
+ [],
+ "server_groups":
+ [{"id": "grp-testy",
+ "resource_type": "server_group",
+ "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-testy",
+ "name": "",
+ "description": null,
+ "created_at": "2011-10-01T00:00:00Z",
+ "default": true}]}
+`
+ var result brightbox.Server
+ _ = json.NewDecoder(strings.NewReader(serverjson)).Decode(&result)
+ return &result
+}
+
+func fakeServerGroupsPlusOne() []brightbox.ServerGroup {
+ const groupjson = `
+[{"id": "grp-sda44",
+ "resource_type": "server_group",
+ "url": "https://api.gb1.brightbox.com/1.0/server_groups/grp-sda44",
+ "name": "storage.k8s-fake.cluster.local",
+ "description": "1:4",
+ "created_at": "2011-10-01T00:00:00Z",
+ "default": true,
+ "account":
+ {"id": "acc-43ks4",
+ "resource_type": "account",
+ "url": "https://api.gb1.brightbox.com/1.0/accounts/acc-43ks4",
+ "name": "Brightbox",
+ "status": "active"},
+ "firewall_policy":
+ {"id": "fwp-j3654",
+ "resource_type": "firewall_policy",
+ "url": "https://api.gb1.brightbox.com/1.0/firewall_policies/fwp-j3654",
+ "default": true,
+ "name": "default",
+ "created_at": "2011-10-01T00:00:00Z",
+ "description": null},
+ "servers":
+ [
+ {"id": "srv-lv426",
+ "resource_type": "server",
+ "url": "https://api.gb1.brightbox.com/1.0/servers/srv-lv426",
+ "name": "",
+ "status": "active",
+ "locked": false,
+ "hostname": "srv-lv426",
+ "fqdn": "srv-lv426.gb1.brightbox.com",
+ "created_at": "2011-10-01T01:00:00Z",
+ "started_at": "2011-10-01T01:01:00Z",
+ "deleted_at": null},
+ {"id": "srv-testy",
+ "resource_type": "server",
+ "url": "https://api.gb1.brightbox.com/1.0/servers/srv-testy",
+ "name": "",
+ "status": "active",
+ "locked": false,
+ "hostname": "srv-testy",
+ "fqdn": "srv-testy.gb1.brightbox.com",
+ "created_at": "2011-10-01T01:00:00Z",
+ "started_at": "2011-10-01T01:01:00Z",
+ "deleted_at": null},
+ {"id": "srv-rp897",
+ "resource_type": "server",
+ "url": "https://api.gb1.brightbox.com/1.0/servers/srv-rp897",
+ "name": "",
+ "status": "active",
+ "locked": false,
+ "hostname": "srv-rp897",
+ "fqdn": "srv-rp897.gb1.brightbox.com",
+ "created_at": "2011-10-01T01:00:00Z",
+ "started_at": "2011-10-01T01:01:00Z",
+ "deleted_at": null}
+ ]}]
+ `
+ var result []brightbox.ServerGroup
+ _ = json.NewDecoder(strings.NewReader(groupjson)).Decode(&result)
+ return result
+}
+
+func deletedFakeServer(server *brightbox.Server) *brightbox.Server {
+ now := time.Now()
+ result := *server
+ result.DeletedAt = &now
+ result.Status = "deleted"
+ result.ServerGroups = []brightbox.ServerGroup{}
+ return &result
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group.go b/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group.go
new file mode 100644
index 000000000000..2b6974c29361
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group.go
@@ -0,0 +1,438 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package brightbox
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ apiv1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
+ brightbox "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/status"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk"
+ "k8s.io/autoscaler/cluster-autoscaler/config"
+ klog "k8s.io/klog/v2"
+ v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
+ schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
+)
+
+const (
+ // Allocatable Resources reserves
+ // Reserve 4% of memory
+ memoryReservePercent = 4
+ // with a minimum of 160MB
+ minimumMemoryReserve = 167772160
+ // Reserve 5GB of disk space
+ minimumDiskReserve = 5368709120
+)
+
+var (
+ checkInterval = time.Second * 1
+ checkTimeout = time.Second * 30
+)
+
+type brightboxNodeGroup struct {
+ id string
+ minSize int
+ maxSize int
+ serverOptions *brightbox.ServerOptions
+ *k8ssdk.Cloud
+}
+
+// MaxSize returns maximum size of the node group.
+func (ng *brightboxNodeGroup) MaxSize() int {
+ klog.V(4).Info("MaxSize")
+ return ng.maxSize
+}
+
+// MinSize returns minimum size of the node group.
+func (ng *brightboxNodeGroup) MinSize() int {
+ klog.V(4).Info("MinSize")
+ return ng.minSize
+}
+
+// TargetSize returns the current target size of the node group. It
+// is possible that the number of nodes in Kubernetes is different at
+// the moment but should be equal to Size() once everything stabilizes
+// (new nodes finish startup and registration or removed nodes are deleted
+// completely). Implementation required.
+func (ng *brightboxNodeGroup) TargetSize() (int, error) {
+ klog.V(4).Info("TargetSize")
+ group, err := ng.GetServerGroup(ng.Id())
+ if err != nil {
+ return 0, err
+ }
+ return len(group.Servers), nil
+}
+
+// CurrentSize returns the current actual size of the node group.
+func (ng *brightboxNodeGroup) CurrentSize() (int, error) {
+ klog.V(4).Info("CurrentSize")
+ // The implementation is currently synchronous, so
+ // CurrentSize and TargetSize will be identical at all times
+ return ng.TargetSize()
+}
+
+// IncreaseSize increases the size of the node group. To delete a node
+// you need to explicitly name it and use DeleteNode. This function should
+// wait until node group size is updated. Implementation required.
+func (ng *brightboxNodeGroup) IncreaseSize(delta int) error {
+ klog.V(4).Infof("IncreaseSize: %v", delta)
+ if delta <= 0 {
+ return fmt.Errorf("size increase must be positive")
+ }
+ size, err := ng.TargetSize()
+ if err != nil {
+ return err
+ }
+ desiredSize := size + delta
+ if desiredSize > ng.MaxSize() {
+ return fmt.Errorf("size increase too large - desired:%d max:%d", desiredSize, ng.MaxSize())
+ }
+ err = ng.createServers(delta)
+ if err != nil {
+ return err
+ }
+ return wait.Poll(
+ checkInterval,
+ checkTimeout,
+ func() (bool, error) {
+ size, err := ng.TargetSize()
+ return err == nil && size >= desiredSize, err
+ },
+ )
+}
+
+// DeleteNodes deletes nodes from this node group. Error is returned
+// either on failure or if the given node doesn't belong to this
+// node group. This function should wait until node group size is
+// updated. Implementation required.
+func (ng *brightboxNodeGroup) DeleteNodes(nodes []*apiv1.Node) error {
+ klog.V(4).Info("DeleteNodes")
+ klog.V(4).Infof("Nodes: %+v", nodes)
+ for _, node := range nodes {
+ size, err := ng.CurrentSize()
+ if err != nil {
+ return err
+ }
+ if size <= ng.MinSize() {
+ return fmt.Errorf("min size reached, no further nodes will be deleted")
+ }
+ serverID := k8ssdk.MapProviderIDToServerID(node.Spec.ProviderID)
+ err = ng.deleteServerFromGroup(serverID)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// DecreaseTargetSize decreases the target size of the node group. This
+// function doesn't permit to delete any existing node and can be used
+// only to reduce the request for new nodes that have not been yet
+// fulfilled. Delta should be negative.
+// It is assumed that cloud provider will not delete the existing nodes
+// when there is an option to just decrease the target. Implementation
+// required.
+func (ng *brightboxNodeGroup) DecreaseTargetSize(delta int) error {
+ klog.V(4).Infof("DecreaseTargetSize: %v", delta)
+ if delta >= 0 {
+ return fmt.Errorf("decrease size must be negative")
+ }
+ size, err := ng.TargetSize()
+ if err != nil {
+ return err
+ }
+ nodesize, err := ng.CurrentSize()
+ if err != nil {
+ return err
+ }
+ // Group size is synchronous at present, so this always fails
+ if size+delta < nodesize {
+ return fmt.Errorf("attempt to delete existing nodes targetSize:%d delta:%d existingNodes: %d",
+ size, delta, nodesize)
+ }
+ return fmt.Errorf("shouldn't have got here")
+}
+
+// Id returns an unique identifier of the node group.
+func (ng *brightboxNodeGroup) Id() string {
+ klog.V(4).Info("Id")
+ return ng.id
+}
+
+// Debug returns a string containing all information regarding this
+// node group.
+func (ng *brightboxNodeGroup) Debug() string {
+ klog.V(4).Info("Debug")
+ return fmt.Sprintf("brightboxNodeGroup %+v", *ng)
+}
+
+// Nodes returns a list of all nodes that belong to this node group.
+// It is required that Instance objects returned by this method have Id
+// field set. Other fields are optional.
+func (ng *brightboxNodeGroup) Nodes() ([]cloudprovider.Instance, error) {
+ klog.V(4).Info("Nodes")
+ group, err := ng.GetServerGroup(ng.Id())
+ if err != nil {
+ return nil, err
+ }
+ klog.V(4).Infof("Found %d servers in group", len(group.Servers))
+ nodes := make([]cloudprovider.Instance, len(group.Servers))
+ for i, server := range group.Servers {
+ cpStatus := cloudprovider.InstanceStatus{}
+ switch server.Status {
+ case status.Active:
+ cpStatus.State = cloudprovider.InstanceRunning
+ case status.Creating:
+ cpStatus.State = cloudprovider.InstanceCreating
+ case status.Deleting:
+ cpStatus.State = cloudprovider.InstanceDeleting
+ default:
+ errorInfo := cloudprovider.InstanceErrorInfo{
+ ErrorClass: cloudprovider.OtherErrorClass,
+ ErrorCode: server.Status,
+ ErrorMessage: server.Status,
+ }
+ cpStatus.ErrorInfo = &errorInfo
+ }
+ nodes[i] = cloudprovider.Instance{
+ Id: k8ssdk.MapServerIDToProviderID(server.Id),
+ Status: &cpStatus,
+ }
+ }
+ klog.V(4).Infof("Created %d nodes", len(nodes))
+ return nodes, nil
+}
+
+// Exist checks if the node group really exists on the cloud provider
+// side. Allows to tell the theoretical node group from the real
+// one. Implementation required.
+func (ng *brightboxNodeGroup) Exist() bool {
+ klog.V(4).Info("Exist")
+ _, err := ng.GetServerGroup(ng.Id())
+ return err == nil
+}
+
+// TemplateNodeInfo returns a schedulerframework.NodeInfo structure of an empty
+// (as if just started) node. This will be used in scale-up simulations to
+// predict what would a new node look like if a node group was expanded. The returned
+// NodeInfo is expected to have a fully populated Node object, with all of the labels,
+// capacity and allocatable information as well as all pods that are started on
+// the node by default, using manifest (most likely only kube-proxy). Implementation optional.
+func (ng *brightboxNodeGroup) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) {
+ klog.V(4).Info("TemplateNodeInfo")
+ klog.V(4).Infof("Looking for server type %q", ng.serverOptions.ServerType)
+ serverType, err := ng.findServerType()
+ if err != nil {
+ return nil, err
+ }
+ klog.V(4).Infof("ServerType %+v", serverType)
+ // AllowedPodNumber is the kubelet default. The way to obtain that default programmatically
+ // has been lost in a twisty maze of endless indirection.
+ resources := &schedulerframework.Resource{
+ MilliCPU: int64(serverType.Cores * 1000),
+ Memory: int64(serverType.Ram * 1024 * 1024),
+ EphemeralStorage: int64(serverType.DiskSize * 1024 * 1024),
+ AllowedPodNumber: 110,
+ }
+ node := apiv1.Node{
+ Status: apiv1.NodeStatus{
+ Capacity: resourceList(resources),
+ Allocatable: resourceList(applyFudgeFactor(resources)),
+ Conditions: cloudprovider.BuildReadyConditions(),
+ },
+ }
+ nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(ng.Id()))
+ nodeInfo.SetNode(&node)
+ return nodeInfo, nil
+}
+
+// ResourceList returns a resource list of this resource.
+func resourceList(r *schedulerframework.Resource) v1.ResourceList {
+ result := v1.ResourceList{
+ v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
+ v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI),
+ v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI),
+ v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI),
+ }
+ for rName, rQuant := range r.ScalarResources {
+ if v1helper.IsHugePageResourceName(rName) {
+ result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
+ } else {
+ result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
+ }
+ }
+ return result
+}
+
+// Create creates the node group on the cloud provider
+// side. Implementation optional.
+func (ng *brightboxNodeGroup) Create() (cloudprovider.NodeGroup, error) {
+ klog.V(4).Info("Create")
+ return nil, cloudprovider.ErrNotImplemented
+}
+
+// Delete deletes the node group on the cloud provider side.
+// This will be executed only for autoprovisioned node groups, once
+// their size drops to 0. Implementation optional.
+func (ng *brightboxNodeGroup) Delete() error {
+ klog.V(4).Info("Delete")
+ return cloudprovider.ErrNotImplemented
+}
+
+// GetOptions returns NodeGroupAutoscalingOptions that should be used for this particular
+// NodeGroup. Returning a nil will result in using default options.
+func (ng *brightboxNodeGroup) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*config.NodeGroupAutoscalingOptions, error) {
+ return nil, cloudprovider.ErrNotImplemented
+}
+
+// Autoprovisioned returns true if the node group is autoprovisioned. An
+// autoprovisioned group was created by CA and can be deleted when scaled
+// to 0.
+func (ng *brightboxNodeGroup) Autoprovisioned() bool {
+ klog.V(4).Info("Autoprovisioned")
+ return false
+}
+
+//private
+
+func (ng *brightboxNodeGroup) findServerType() (*brightbox.ServerType, error) {
+ handle := ng.serverOptions.ServerType
+ if strings.HasPrefix(handle, "typ-") {
+ return ng.GetServerType(handle)
+ }
+ servertypes, err := ng.GetServerTypes()
+ if err != nil {
+ return nil, err
+ }
+ for _, servertype := range servertypes {
+ if servertype.Handle == handle {
+ return &servertype, nil
+ }
+ }
+ return nil, fmt.Errorf("ServerType with handle '%s' doesn't exist", handle)
+}
+
+func max(x, y int64) int64 {
+ if x > y {
+ return x
+ }
+ return y
+}
+
+func applyFudgeFactor(capacity *schedulerframework.Resource) *schedulerframework.Resource {
+ allocatable := capacity.Clone()
+ allocatable.Memory = max(0, capacity.Memory-max(capacity.Memory*memoryReservePercent/100, minimumMemoryReserve))
+ allocatable.EphemeralStorage = max(0, capacity.EphemeralStorage-minimumDiskReserve)
+ return allocatable
+}
+
+func makeNodeGroupFromAPIDetails(
+ name string,
+ mapData map[string]string,
+ minSize int,
+ maxSize int,
+ cloudclient *k8ssdk.Cloud,
+) *brightboxNodeGroup {
+ klog.V(4).Info("makeNodeGroupFromApiDetails")
+ userData := mapData["user_data"]
+ options := &brightbox.ServerOptions{
+ Image: mapData["image"],
+ Name: &name,
+ ServerType: mapData["type"],
+ Zone: mapData["zone"],
+ UserData: &userData,
+ ServerGroups: []string{mapData["default_group"], mapData["server_group"]},
+ }
+ result := brightboxNodeGroup{
+ id: mapData["server_group"],
+ minSize: minSize,
+ maxSize: maxSize,
+ serverOptions: options,
+ Cloud: cloudclient,
+ }
+ klog.V(4).Info(result.Debug())
+ return &result
+}
+
+func (ng *brightboxNodeGroup) createServers(amount int) error {
+ klog.V(4).Infof("createServers: %d", amount)
+ for i := 1; i <= amount; i++ {
+ _, err := ng.CreateServer(ng.serverOptions)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Delete the server and wait for the group details to be updated
+func (ng *brightboxNodeGroup) deleteServerFromGroup(serverID string) error {
+ klog.V(4).Infof("deleteServerFromGroup: %q", serverID)
+ serverIDNotInGroup := func() (bool, error) {
+ return ng.isMissing(serverID)
+ }
+ missing, err := serverIDNotInGroup()
+ if err != nil {
+ return err
+ } else if missing {
+ return fmt.Errorf("%s belongs to a different group than %s", serverID, ng.Id())
+ }
+ err = ng.DestroyServer(serverID)
+ if err != nil {
+ return err
+ }
+ return wait.Poll(
+ checkInterval,
+ checkTimeout,
+ serverIDNotInGroup,
+ )
+}
+
+func serverNotFoundError(id string) error {
+ klog.V(4).Infof("serverNotFoundError: created for %q", id)
+ return fmt.Errorf("Server %s not found", id)
+}
+
+func (ng *brightboxNodeGroup) isMissing(serverID string) (bool, error) {
+ klog.V(4).Infof("isMissing: %q from %q", serverID, ng.Id())
+ server, err := ng.GetServer(
+ context.Background(),
+ serverID,
+ serverNotFoundError(serverID),
+ )
+ if err != nil {
+ return false, err
+ }
+ if server.DeletedAt != nil {
+ klog.V(4).Info("server deleted")
+ return true, nil
+ }
+ for _, group := range server.ServerGroups {
+ if group.Id == ng.Id() {
+ return false, nil
+ }
+ }
+ return true, nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group_test.go b/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group_test.go
new file mode 100644
index 000000000000..7c42db9499e8
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/brightbox_node_group_test.go
@@ -0,0 +1,347 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package brightbox
+
+import (
+ "errors"
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks"
+ schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
+ //schedulerframework "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
+)
+
+const (
+ fakeMaxSize = 4
+ fakeMinSize = 1
+ fakeNodeGroupDescription = "1:4"
+ fakeDefaultSize = 3
+ fakeNodeGroupID = "grp-sda44"
+ fakeNodeGroupName = "auto.workers.k8s_fake.cluster.local"
+ fakeNodeGroupImageID = "img-testy"
+ fakeNodeGroupServerTypeID = "typ-zx45f"
+ fakeNodeGroupServerTypeHandle = "small"
+ fakeNodeGroupZoneID = "zon-testy"
+ fakeNodeGroupMainGroupID = "grp-y6cai"
+ fakeNodeGroupUserData = "fake userdata"
+)
+
+var (
+ fakeMapData = map[string]string{
+ "min": strconv.Itoa(fakeMinSize),
+ "max": strconv.Itoa(fakeMaxSize),
+ "server_group": fakeNodeGroupID,
+ "default_group": fakeNodeGroupMainGroupID,
+ "image": fakeNodeGroupImageID,
+ "type": fakeNodeGroupServerTypeID,
+ "zone": fakeNodeGroupZoneID,
+ "user_data": fakeNodeGroupUserData,
+ }
+ ErrFake = errors.New("fake API Error")
+ fakeInstances = []cloudprovider.Instance{
+ {
+ Id: "brightbox://srv-rp897",
+ Status: &cloudprovider.InstanceStatus{
+ State: cloudprovider.InstanceRunning,
+ },
+ },
+ {
+ Id: "brightbox://srv-lv426",
+ Status: &cloudprovider.InstanceStatus{
+ State: cloudprovider.InstanceRunning,
+ },
+ },
+ }
+ fakeTransitionInstances = []cloudprovider.Instance{
+ {
+ Id: "brightbox://srv-rp897",
+ Status: &cloudprovider.InstanceStatus{
+ State: cloudprovider.InstanceDeleting,
+ },
+ },
+ {
+ Id: "brightbox://srv-lv426",
+ Status: &cloudprovider.InstanceStatus{
+ State: cloudprovider.InstanceCreating,
+ },
+ },
+ }
+ ErrFakeInstances = []cloudprovider.Instance{
+ {
+ Id: "brightbox://srv-rp897",
+ Status: &cloudprovider.InstanceStatus{
+ ErrorInfo: &cloudprovider.InstanceErrorInfo{
+ ErrorClass: cloudprovider.OtherErrorClass,
+ ErrorCode: "unavailable",
+ ErrorMessage: "unavailable",
+ },
+ },
+ },
+ {
+ Id: "brightbox://srv-lv426",
+ Status: &cloudprovider.InstanceStatus{
+ ErrorInfo: &cloudprovider.InstanceErrorInfo{
+ ErrorClass: cloudprovider.OtherErrorClass,
+ ErrorCode: "inactive",
+ ErrorMessage: "inactive",
+ },
+ },
+ },
+ }
+)
+
+func TestMaxSize(t *testing.T) {
+ assert.Equal(t, makeFakeNodeGroup(nil).MaxSize(), fakeMaxSize)
+}
+
+func TestMinSize(t *testing.T) {
+ assert.Equal(t, makeFakeNodeGroup(nil).MinSize(), fakeMinSize)
+}
+
+func TestSize(t *testing.T) {
+ mockclient := new(mocks.CloudAccess)
+ testclient := k8ssdk.MakeTestClient(mockclient, nil)
+ nodeGroup := makeFakeNodeGroup(testclient)
+ fakeServerGroup := &fakeGroups()[0]
+ t.Run("TargetSize", func(t *testing.T) {
+ mockclient.On("ServerGroup", fakeNodeGroupID).
+ Return(fakeServerGroup, nil).Once()
+ size, err := nodeGroup.TargetSize()
+ assert.Equal(t, 2, size)
+ assert.NoError(t, err)
+ })
+ t.Run("TargetSizeFail", func(t *testing.T) {
+ mockclient.On("ServerGroup", fakeNodeGroupID).
+ Return(nil, ErrFake).Once()
+ size, err := nodeGroup.TargetSize()
+ assert.Error(t, err)
+ assert.Zero(t, size)
+ })
+ t.Run("CurrentSize", func(t *testing.T) {
+ mockclient.On("ServerGroup", fakeNodeGroupID).
+ Return(fakeServerGroup, nil).Once()
+ size, err := nodeGroup.CurrentSize()
+ assert.Equal(t, 2, size)
+ assert.NoError(t, err)
+ })
+ t.Run("CurrentSizeFail", func(t *testing.T) {
+ mockclient.On("ServerGroup", fakeNodeGroupID).
+ Return(nil, ErrFake).Once()
+ size, err := nodeGroup.CurrentSize()
+ assert.Error(t, err)
+ assert.Zero(t, size)
+ })
+ mockclient.On("ServerGroup", fakeNodeGroupID).
+ Return(fakeServerGroup, nil)
+ t.Run("DecreaseTargetSizePositive", func(t *testing.T) {
+ err := nodeGroup.DecreaseTargetSize(0)
+ assert.Error(t, err)
+ })
+ t.Run("DecreaseTargetSizeFail", func(t *testing.T) {
+ err := nodeGroup.DecreaseTargetSize(-1)
+ assert.Error(t, err)
+ })
+ mockclient.AssertExpectations(t)
+}
+
+func TestIncreaseSize(t *testing.T) {
+ mockclient := new(mocks.CloudAccess)
+ testclient := k8ssdk.MakeTestClient(mockclient, nil)
+ nodeGroup := makeFakeNodeGroup(testclient)
+ t.Run("Creating details set properly", func(t *testing.T) {
+ assert.Equal(t, fakeNodeGroupID, nodeGroup.id)
+ assert.Equal(t, fakeNodeGroupName, *nodeGroup.serverOptions.Name)
+ assert.Equal(t, fakeNodeGroupServerTypeID, nodeGroup.serverOptions.ServerType)
+ assert.Equal(t, fakeNodeGroupImageID, nodeGroup.serverOptions.Image)
+ assert.Equal(t, fakeNodeGroupZoneID, nodeGroup.serverOptions.Zone)
+ assert.ElementsMatch(t, []string{fakeNodeGroupMainGroupID, fakeNodeGroupID}, nodeGroup.serverOptions.ServerGroups)
+ assert.Equal(t, fakeNodeGroupUserData, *nodeGroup.serverOptions.UserData)
+ })
+ t.Run("Require positive delta", func(t *testing.T) {
+ err := nodeGroup.IncreaseSize(0)
+ assert.Error(t, err)
+ })
+ fakeServerGroup := &fakeGroups()[0]
+ t.Run("Don't exceed max size", func(t *testing.T) {
+ mockclient.On("ServerGroup", fakeNodeGroupID).
+ Return(fakeServerGroup, nil).Once()
+ err := nodeGroup.IncreaseSize(4)
+ assert.Error(t, err)
+ })
+ t.Run("Fail to create one new server", func(t *testing.T) {
+ mockclient.On("ServerGroup", fakeNodeGroupID).
+ Return(fakeServerGroup, nil).Once()
+ mockclient.On("CreateServer", mock.Anything).
+ Return(nil, ErrFake).Once()
+ err := nodeGroup.IncreaseSize(1)
+ assert.Error(t, err)
+ })
+ t.Run("Create one new server", func(t *testing.T) {
+ mockclient.On("ServerGroup", fakeNodeGroupID).
+ Return(fakeServerGroup, nil).Once()
+ mockclient.On("CreateServer", mock.Anything).
+ Return(nil, nil).Once()
+ mockclient.On("ServerGroup", fakeNodeGroupID).
+ Return(&fakeServerGroupsPlusOne()[0], nil).Once()
+ err := nodeGroup.IncreaseSize(1)
+ assert.NoError(t, err)
+ })
+}
+
+func TestDeleteNodes(t *testing.T) {
+ mockclient := new(mocks.CloudAccess)
+ testclient := k8ssdk.MakeTestClient(mockclient, nil)
+ nodeGroup := makeFakeNodeGroup(testclient)
+ fakeServerGroup := &fakeGroups()[0]
+ mockclient.On("ServerGroup", fakeNodeGroupID).
+ Return(fakeServerGroup, nil).
+ On("Server", fakeServer).
+ Return(fakeServertesty(), nil)
+ t.Run("Empty Nodes", func(t *testing.T) {
+ err := nodeGroup.DeleteNodes(nil)
+ assert.NoError(t, err)
+ })
+ t.Run("Foreign Node", func(t *testing.T) {
+ err := nodeGroup.DeleteNodes([]*v1.Node{makeNode(fakeServer)})
+ assert.Error(t, err)
+ })
+ t.Run("Delete Node", func(t *testing.T) {
+ mockclient.On("Server", "srv-rp897").
+ Return(fakeServerrp897(), nil).Once().
+ On("Server", "srv-rp897").
+ Return(deletedFakeServer(fakeServerrp897()), nil).
+ Once().
+ On("DestroyServer", "srv-rp897").
+ Return(nil).Once()
+ err := nodeGroup.DeleteNodes([]*v1.Node{makeNode("srv-rp897")})
+ assert.NoError(t, err)
+ })
+ t.Run("Delete All Nodes", func(t *testing.T) {
+ truncateServers := mocks.ServerListReducer(fakeServerGroup)
+ mockclient.On("Server", "srv-rp897").
+ Return(fakeServerrp897(), nil).Once().
+ On("Server", "srv-rp897").
+ Return(deletedFakeServer(fakeServerrp897()), nil).
+ Once().
+ On("DestroyServer", "srv-rp897").
+ Return(nil).Once().Run(truncateServers)
+ err := nodeGroup.DeleteNodes([]*v1.Node{
+ makeNode("srv-rp897"),
+ makeNode("srv-lv426"),
+ })
+ assert.Error(t, err)
+ })
+
+}
+
+func TestExist(t *testing.T) {
+ mockclient := new(mocks.CloudAccess)
+ testclient := k8ssdk.MakeTestClient(mockclient, nil)
+ nodeGroup := makeFakeNodeGroup(testclient)
+ fakeServerGroup := &fakeGroups()[0]
+ t.Run("Find Group", func(t *testing.T) {
+ mockclient.On("ServerGroup", nodeGroup.Id()).
+ Return(fakeServerGroup, nil).Once()
+ assert.True(t, nodeGroup.Exist())
+ })
+ t.Run("Fail to Find Group", func(t *testing.T) {
+ mockclient.On("ServerGroup", nodeGroup.Id()).
+ Return(nil, serverNotFoundError(nodeGroup.Id()))
+ assert.False(t, nodeGroup.Exist())
+ })
+ mockclient.AssertExpectations(t)
+}
+
+func TestNodes(t *testing.T) {
+ mockclient := new(mocks.CloudAccess)
+ testclient := k8ssdk.MakeTestClient(mockclient, nil)
+ nodeGroup := makeFakeNodeGroup(testclient)
+ fakeServerGroup := &fakeGroups()[0]
+ mockclient.On("ServerGroup", fakeNodeGroupID).
+ Return(fakeServerGroup, nil)
+ t.Run("Both Active", func(t *testing.T) {
+ fakeServerGroup.Servers[0].Status = "active"
+ fakeServerGroup.Servers[1].Status = "active"
+ nodes, err := nodeGroup.Nodes()
+ require.NoError(t, err)
+ assert.ElementsMatch(t, fakeInstances, nodes)
+ })
+ t.Run("Creating and Deleting", func(t *testing.T) {
+ fakeServerGroup.Servers[0].Status = "creating"
+ fakeServerGroup.Servers[1].Status = "deleting"
+ nodes, err := nodeGroup.Nodes()
+ require.NoError(t, err)
+ assert.ElementsMatch(t, fakeTransitionInstances, nodes)
+ })
+ t.Run("Inactive and Unavailable", func(t *testing.T) {
+ fakeServerGroup.Servers[0].Status = "inactive"
+ fakeServerGroup.Servers[1].Status = "unavailable"
+ nodes, err := nodeGroup.Nodes()
+ require.NoError(t, err)
+ assert.ElementsMatch(t, ErrFakeInstances, nodes)
+ })
+}
+
+func TestTemplateNodeInfo(t *testing.T) {
+ mockclient := new(mocks.CloudAccess)
+ testclient := k8ssdk.MakeTestClient(mockclient, nil)
+ mockclient.On("ServerType", fakeNodeGroupServerTypeID).
+ Return(fakeServerTypezx45f(), nil)
+ obj, err := makeFakeNodeGroup(testclient).TemplateNodeInfo()
+ require.NoError(t, err)
+ assert.Equal(t, fakeResource(), obj.Allocatable)
+}
+
+func TestCreate(t *testing.T) {
+ obj, err := makeFakeNodeGroup(nil).Create()
+ assert.Equal(t, cloudprovider.ErrNotImplemented, err)
+ assert.Nil(t, obj)
+}
+
+func TestDelete(t *testing.T) {
+ assert.Equal(t, cloudprovider.ErrNotImplemented, makeFakeNodeGroup(nil).Delete())
+}
+
+func TestAutoprovisioned(t *testing.T) {
+ assert.False(t, makeFakeNodeGroup(nil).Autoprovisioned())
+}
+
+func fakeResource() *schedulerframework.Resource {
+ return &schedulerframework.Resource{
+ MilliCPU: 2000,
+ Memory: 1979711488,
+ EphemeralStorage: 80530636800,
+ AllowedPodNumber: 110,
+ }
+}
+
+func makeFakeNodeGroup(brightboxCloudClient *k8ssdk.Cloud) *brightboxNodeGroup {
+ return makeNodeGroupFromAPIDetails(
+ fakeNodeGroupName,
+ fakeMapData,
+ fakeMinSize,
+ fakeMaxSize,
+ brightboxCloudClient,
+ )
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/examples/check-env.yaml b/cluster-autoscaler/cloudprovider/brightbox/examples/check-env.yaml
new file mode 100644
index 000000000000..844f10d365e0
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/examples/check-env.yaml
@@ -0,0 +1,16 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: check-env
+ namespace: kube-system
+spec:
+ template:
+ spec:
+ restartPolicy: Never
+ containers:
+ - name: check-env
+ image: bash
+ envFrom:
+ - secretRef:
+ name: brightbox-credentials
+ command: ["env"]
diff --git a/cluster-autoscaler/cloudprovider/brightbox/examples/cluster-autoscaler-secret.yaml b/cluster-autoscaler/cloudprovider/brightbox/examples/cluster-autoscaler-secret.yaml
new file mode 100644
index 000000000000..b20af4efcdc6
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/examples/cluster-autoscaler-secret.yaml
@@ -0,0 +1,13 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: brightbox-credentials
+ namespace: kube-system
+type: Opaque
+data:
+ BRIGHTBOX_API_URL:
+ BRIGHTBOX_CLIENT:
+ BRIGHTBOX_CLIENT_SECRET:
+ BRIGHTBOX_KUBE_JOIN_COMMAND:
+ BRIGHTBOX_KUBE_VERSION:
diff --git a/cluster-autoscaler/cloudprovider/brightbox/examples/config.rb b/cluster-autoscaler/cloudprovider/brightbox/examples/config.rb
new file mode 100644
index 000000000000..775a29ac9ee7
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/examples/config.rb
@@ -0,0 +1,39 @@
+def config
+ {
+ cluster_name: 'kubernetes.cluster.local',
+ image: 'brightbox/cluster-autoscaler-brightbox',
+ secret: 'brightbox-credentials'
+ }
+end
+
+def output(config)
+ { 'autoDiscovery' => { 'clusterName' => config[:cluster_name] },
+ 'cloudProvider' => 'brightbox',
+ 'image' =>
+ { 'repository' => config[:image],
+ 'tag' => ENV['TAG'],
+ 'pullPolicy' => 'Always' },
+ 'tolerations' =>
+ [
+ { 'effect' => 'NoSchedule', 'key' => 'node-role.kubernetes.io/master' },
+ { 'operator' => 'Exists', 'key' => 'CriticalAddonsOnly' }
+ ],
+ 'extraArgs' =>
+ { 'v' => (ENV['TAG'] == 'dev' ? 4 : 2).to_s,
+ 'stderrthreshold' => 'info',
+ 'logtostderr' => true,
+ 'cluster-name' => config[:cluster_name],
+ 'skip-nodes-with-local-storage' => true },
+ 'podAnnotations' =>
+ { 'prometheus.io/scrape' => 'true', 'prometheus.io/port' => '8085' },
+ 'rbac' => { 'create' => true },
+ 'resources' =>
+ { 'limits' => { 'cpu' => '100m', 'memory' => '300Mi' },
+ 'requests' => { 'cpu' => '100m', 'memory' => '300Mi' } },
+ 'envFromSecret' => config[:secret],
+ 'priorityClassName' => 'system-cluster-critical',
+ 'dnsPolicy' => 'Default' }
+end
+
+require 'yaml'
+STDOUT << output(config).to_yaml
diff --git a/cluster-autoscaler/cloudprovider/brightbox/examples/rebase.sh b/cluster-autoscaler/cloudprovider/brightbox/examples/rebase.sh
new file mode 100644
index 000000000000..0d751faddaa2
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/examples/rebase.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+set -e
+
+git rebase --onto cluster-autoscaler-1.17.2 cluster-autoscaler-1.17.1 autoscaler-brightbox-cloudprovider-1.17
+git rebase --onto cluster-autoscaler-1.18.1 cluster-autoscaler-1.18.0 autoscaler-brightbox-cloudprovider-1.18
+
diff --git a/cluster-autoscaler/cloudprovider/brightbox/go-cache/CONTRIBUTORS b/cluster-autoscaler/cloudprovider/brightbox/go-cache/CONTRIBUTORS
new file mode 100644
index 000000000000..2b16e997415f
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/go-cache/CONTRIBUTORS
@@ -0,0 +1,9 @@
+This is a list of people who have contributed code to go-cache. They, or their
+employers, are the copyright holders of the contributed code. Contributed code
+is subject to the license restrictions listed in LICENSE (as they were when the
+code was contributed.)
+
+Dustin Sallings
+Jason Mooberry
+Sergey Shepelev
+Alex Edwards
diff --git a/cluster-autoscaler/cloudprovider/brightbox/go-cache/LICENSE b/cluster-autoscaler/cloudprovider/brightbox/go-cache/LICENSE
new file mode 100644
index 000000000000..db9903c75c58
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/go-cache/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2012-2017 Patrick Mylund Nielsen and the go-cache contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/cluster-autoscaler/cloudprovider/brightbox/go-cache/README.md b/cluster-autoscaler/cloudprovider/brightbox/go-cache/README.md
new file mode 100644
index 000000000000..c5789cc66cc8
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/go-cache/README.md
@@ -0,0 +1,83 @@
+# go-cache
+
+go-cache is an in-memory key:value store/cache similar to memcached that is
+suitable for applications running on a single machine. Its major advantage is
+that, being essentially a thread-safe `map[string]interface{}` with expiration
+times, it doesn't need to serialize or transmit its contents over the network.
+
+Any object can be stored, for a given duration or forever, and the cache can be
+safely used by multiple goroutines.
+
+Although go-cache isn't meant to be used as a persistent datastore, the entire
+cache can be saved to and loaded from a file (using `c.Items()` to retrieve the
+items map to serialize, and `NewFrom()` to create a cache from a deserialized
+one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.)
+
+### Installation
+
+`go get github.com/patrickmn/go-cache`
+
+### Usage
+
+```go
+import (
+ "fmt"
+ "github.com/patrickmn/go-cache"
+ "time"
+)
+
+func main() {
+ // Create a cache with a default expiration time of 5 minutes, and which
+ // purges expired items every 10 minutes
+ c := cache.New(5*time.Minute, 10*time.Minute)
+
+ // Set the value of the key "foo" to "bar", with the default expiration time
+ c.Set("foo", "bar", cache.DefaultExpiration)
+
+ // Set the value of the key "baz" to 42, with no expiration time
+ // (the item won't be removed until it is re-set, or removed using
+ // c.Delete("baz")
+ c.Set("baz", 42, cache.NoExpiration)
+
+ // Get the string associated with the key "foo" from the cache
+ foo, found := c.Get("foo")
+ if found {
+ fmt.Println(foo)
+ }
+
+ // Since Go is statically typed, and cache values can be anything, type
+ // assertion is needed when values are being passed to functions that don't
+ // take arbitrary types, (i.e. interface{}). The simplest way to do this for
+ // values which will only be used once--e.g. for passing to another
+ // function--is:
+ foo, found := c.Get("foo")
+ if found {
+ MyFunction(foo.(string))
+ }
+
+ // This gets tedious if the value is used several times in the same function.
+ // You might do either of the following instead:
+ if x, found := c.Get("foo"); found {
+ foo := x.(string)
+ // ...
+ }
+ // or
+ var foo string
+ if x, found := c.Get("foo"); found {
+ foo = x.(string)
+ }
+ // ...
+ // foo can then be passed around freely as a string
+
+ // Want performance? Store pointers!
+ c.Set("foo", &MyStruct, cache.DefaultExpiration)
+ if x, found := c.Get("foo"); found {
+ foo := x.(*MyStruct)
+ // ...
+ }
+}
+```
+
+### Reference
+
+`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache)
diff --git a/cluster-autoscaler/cloudprovider/brightbox/go-cache/cache.go b/cluster-autoscaler/cloudprovider/brightbox/go-cache/cache.go
new file mode 100644
index 000000000000..db88d2f2cb19
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/go-cache/cache.go
@@ -0,0 +1,1161 @@
+package cache
+
+import (
+ "encoding/gob"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sync"
+ "time"
+)
+
+type Item struct {
+ Object interface{}
+ Expiration int64
+}
+
+// Returns true if the item has expired.
+func (item Item) Expired() bool {
+ if item.Expiration == 0 {
+ return false
+ }
+ return time.Now().UnixNano() > item.Expiration
+}
+
+const (
+ // For use with functions that take an expiration time.
+ NoExpiration time.Duration = -1
+ // For use with functions that take an expiration time. Equivalent to
+ // passing in the same expiration duration as was given to New() or
+ // NewFrom() when the cache was created (e.g. 5 minutes.)
+ DefaultExpiration time.Duration = 0
+)
+
+type Cache struct {
+ *cache
+ // If this is confusing, see the comment at the bottom of New()
+}
+
+type cache struct {
+ defaultExpiration time.Duration
+ items map[string]Item
+ mu sync.RWMutex
+ onEvicted func(string, interface{})
+ janitor *janitor
+}
+
+// Add an item to the cache, replacing any existing item. If the duration is 0
+// (DefaultExpiration), the cache's default expiration time is used. If it is -1
+// (NoExpiration), the item never expires.
+func (c *cache) Set(k string, x interface{}, d time.Duration) {
+ // "Inlining" of set
+ var e int64
+ if d == DefaultExpiration {
+ d = c.defaultExpiration
+ }
+ if d > 0 {
+ e = time.Now().Add(d).UnixNano()
+ }
+ c.mu.Lock()
+ c.items[k] = Item{
+ Object: x,
+ Expiration: e,
+ }
+ // TODO: Calls to mu.Unlock are currently not deferred because defer
+ // adds ~200 ns (as of go1.)
+ c.mu.Unlock()
+}
+
+func (c *cache) set(k string, x interface{}, d time.Duration) {
+ var e int64
+ if d == DefaultExpiration {
+ d = c.defaultExpiration
+ }
+ if d > 0 {
+ e = time.Now().Add(d).UnixNano()
+ }
+ c.items[k] = Item{
+ Object: x,
+ Expiration: e,
+ }
+}
+
+// Add an item to the cache, replacing any existing item, using the default
+// expiration.
+func (c *cache) SetDefault(k string, x interface{}) {
+ c.Set(k, x, DefaultExpiration)
+}
+
+// Add an item to the cache only if an item doesn't already exist for the given
+// key, or if the existing item has expired. Returns an error otherwise.
+func (c *cache) Add(k string, x interface{}, d time.Duration) error {
+ c.mu.Lock()
+ _, found := c.get(k)
+ if found {
+ c.mu.Unlock()
+ return fmt.Errorf("Item %s already exists", k)
+ }
+ c.set(k, x, d)
+ c.mu.Unlock()
+ return nil
+}
+
+// Set a new value for the cache key only if it already exists, and the existing
+// item hasn't expired. Returns an error otherwise.
+func (c *cache) Replace(k string, x interface{}, d time.Duration) error {
+ c.mu.Lock()
+ _, found := c.get(k)
+ if !found {
+ c.mu.Unlock()
+ return fmt.Errorf("Item %s doesn't exist", k)
+ }
+ c.set(k, x, d)
+ c.mu.Unlock()
+ return nil
+}
+
+// Get an item from the cache. Returns the item or nil, and a bool indicating
+// whether the key was found.
+func (c *cache) Get(k string) (interface{}, bool) {
+ c.mu.RLock()
+ // "Inlining" of get and Expired
+ item, found := c.items[k]
+ if !found {
+ c.mu.RUnlock()
+ return nil, false
+ }
+ if item.Expiration > 0 {
+ if time.Now().UnixNano() > item.Expiration {
+ c.mu.RUnlock()
+ return nil, false
+ }
+ }
+ c.mu.RUnlock()
+ return item.Object, true
+}
+
+// GetWithExpiration returns an item and its expiration time from the cache.
+// It returns the item or nil, the expiration time if one is set (if the item
+// never expires a zero value for time.Time is returned), and a bool indicating
+// whether the key was found.
+func (c *cache) GetWithExpiration(k string) (interface{}, time.Time, bool) {
+ c.mu.RLock()
+ // "Inlining" of get and Expired
+ item, found := c.items[k]
+ if !found {
+ c.mu.RUnlock()
+ return nil, time.Time{}, false
+ }
+
+ if item.Expiration > 0 {
+ if time.Now().UnixNano() > item.Expiration {
+ c.mu.RUnlock()
+ return nil, time.Time{}, false
+ }
+
+ // Return the item and the expiration time
+ c.mu.RUnlock()
+ return item.Object, time.Unix(0, item.Expiration), true
+ }
+
+ // If expiration <= 0 (i.e. no expiration time set) then return the item
+ // and a zeroed time.Time
+ c.mu.RUnlock()
+ return item.Object, time.Time{}, true
+}
+
+func (c *cache) get(k string) (interface{}, bool) {
+ item, found := c.items[k]
+ if !found {
+ return nil, false
+ }
+ // "Inlining" of Expired
+ if item.Expiration > 0 {
+ if time.Now().UnixNano() > item.Expiration {
+ return nil, false
+ }
+ }
+ return item.Object, true
+}
+
+// Increment an item of type int, int8, int16, int32, int64, uintptr, uint,
+// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the
+// item's value is not an integer, if it was not found, or if it is not
+// possible to increment it by n. To retrieve the incremented value, use one
+// of the specialized methods, e.g. IncrementInt64.
+func (c *cache) Increment(k string, n int64) error {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return fmt.Errorf("Item %s not found", k)
+ }
+ switch v.Object.(type) {
+ case int:
+ v.Object = v.Object.(int) + int(n)
+ case int8:
+ v.Object = v.Object.(int8) + int8(n)
+ case int16:
+ v.Object = v.Object.(int16) + int16(n)
+ case int32:
+ v.Object = v.Object.(int32) + int32(n)
+ case int64:
+ v.Object = v.Object.(int64) + n
+ case uint:
+ v.Object = v.Object.(uint) + uint(n)
+ case uintptr:
+ v.Object = v.Object.(uintptr) + uintptr(n)
+ case uint8:
+ v.Object = v.Object.(uint8) + uint8(n)
+ case uint16:
+ v.Object = v.Object.(uint16) + uint16(n)
+ case uint32:
+ v.Object = v.Object.(uint32) + uint32(n)
+ case uint64:
+ v.Object = v.Object.(uint64) + uint64(n)
+ case float32:
+ v.Object = v.Object.(float32) + float32(n)
+ case float64:
+ v.Object = v.Object.(float64) + float64(n)
+ default:
+ c.mu.Unlock()
+ return fmt.Errorf("The value for %s is not an integer", k)
+ }
+ c.items[k] = v
+ c.mu.Unlock()
+ return nil
+}
+
+// Increment an item of type float32 or float64 by n. Returns an error if the
+// item's value is not floating point, if it was not found, or if it is not
+// possible to increment it by n. Pass a negative number to decrement the
+// value. To retrieve the incremented value, use one of the specialized methods,
+// e.g. IncrementFloat64.
+func (c *cache) IncrementFloat(k string, n float64) error {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return fmt.Errorf("Item %s not found", k)
+ }
+ switch v.Object.(type) {
+ case float32:
+ v.Object = v.Object.(float32) + float32(n)
+ case float64:
+ v.Object = v.Object.(float64) + n
+ default:
+ c.mu.Unlock()
+ return fmt.Errorf("The value for %s does not have type float32 or float64", k)
+ }
+ c.items[k] = v
+ c.mu.Unlock()
+ return nil
+}
+
+// Increment an item of type int by n. Returns an error if the item's value is
+// not an int, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt(k string, n int) (int, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(int)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an int", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type int8 by n. Returns an error if the item's value is
+// not an int8, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt8(k string, n int8) (int8, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(int8)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an int8", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type int16 by n. Returns an error if the item's value is
+// not an int16, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt16(k string, n int16) (int16, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(int16)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an int16", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type int32 by n. Returns an error if the item's value is
+// not an int32, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt32(k string, n int32) (int32, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(int32)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an int32", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type int64 by n. Returns an error if the item's value is
+// not an int64, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt64(k string, n int64) (int64, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(int64)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an int64", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type uint by n. Returns an error if the item's value is
+// not an uint, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementUint(k string, n uint) (uint, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uint)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uint", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type uintptr by n. Returns an error if the item's value
+// is not an uintptr, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUintptr(k string, n uintptr) (uintptr, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uintptr)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uintptr", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type uint8 by n. Returns an error if the item's value
+// is not an uint8, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint8(k string, n uint8) (uint8, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uint8)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uint8", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type uint16 by n. Returns an error if the item's value
+// is not an uint16, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint16(k string, n uint16) (uint16, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uint16)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uint16", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type uint32 by n. Returns an error if the item's value
+// is not an uint32, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint32(k string, n uint32) (uint32, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uint32)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uint32", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type uint64 by n. Returns an error if the item's value
+// is not an uint64, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint64(k string, n uint64) (uint64, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uint64)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uint64", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type float32 by n. Returns an error if the item's value
+// is not an float32, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementFloat32(k string, n float32) (float32, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(float32)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an float32", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Increment an item of type float64 by n. Returns an error if the item's value
+// is not an float64, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementFloat64(k string, n float64) (float64, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(float64)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an float64", k)
+ }
+ nv := rv + n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type int, int8, int16, int32, int64, uintptr, uint,
+// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the
+// item's value is not an integer, if it was not found, or if it is not
+// possible to decrement it by n. To retrieve the decremented value, use one
+// of the specialized methods, e.g. DecrementInt64.
+func (c *cache) Decrement(k string, n int64) error {
+ // TODO: Implement Increment and Decrement more cleanly.
+ // (Cannot do Increment(k, n*-1) for uints.)
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return fmt.Errorf("Item not found")
+ }
+ switch v.Object.(type) {
+ case int:
+ v.Object = v.Object.(int) - int(n)
+ case int8:
+ v.Object = v.Object.(int8) - int8(n)
+ case int16:
+ v.Object = v.Object.(int16) - int16(n)
+ case int32:
+ v.Object = v.Object.(int32) - int32(n)
+ case int64:
+ v.Object = v.Object.(int64) - n
+ case uint:
+ v.Object = v.Object.(uint) - uint(n)
+ case uintptr:
+ v.Object = v.Object.(uintptr) - uintptr(n)
+ case uint8:
+ v.Object = v.Object.(uint8) - uint8(n)
+ case uint16:
+ v.Object = v.Object.(uint16) - uint16(n)
+ case uint32:
+ v.Object = v.Object.(uint32) - uint32(n)
+ case uint64:
+ v.Object = v.Object.(uint64) - uint64(n)
+ case float32:
+ v.Object = v.Object.(float32) - float32(n)
+ case float64:
+ v.Object = v.Object.(float64) - float64(n)
+ default:
+ c.mu.Unlock()
+ return fmt.Errorf("The value for %s is not an integer", k)
+ }
+ c.items[k] = v
+ c.mu.Unlock()
+ return nil
+}
+
+// Decrement an item of type float32 or float64 by n. Returns an error if the
+// item's value is not floating point, if it was not found, or if it is not
+// possible to decrement it by n. Pass a negative number to decrement the
+// value. To retrieve the decremented value, use one of the specialized methods,
+// e.g. DecrementFloat64.
+func (c *cache) DecrementFloat(k string, n float64) error {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return fmt.Errorf("Item %s not found", k)
+ }
+ switch v.Object.(type) {
+ case float32:
+ v.Object = v.Object.(float32) - float32(n)
+ case float64:
+ v.Object = v.Object.(float64) - n
+ default:
+ c.mu.Unlock()
+ return fmt.Errorf("The value for %s does not have type float32 or float64", k)
+ }
+ c.items[k] = v
+ c.mu.Unlock()
+ return nil
+}
+
+// Decrement an item of type int by n. Returns an error if the item's value is
+// not an int, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt(k string, n int) (int, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(int)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an int", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type int8 by n. Returns an error if the item's value is
+// not an int8, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt8(k string, n int8) (int8, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(int8)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an int8", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type int16 by n. Returns an error if the item's value is
+// not an int16, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt16(k string, n int16) (int16, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(int16)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an int16", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type int32 by n. Returns an error if the item's value is
+// not an int32, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt32(k string, n int32) (int32, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(int32)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an int32", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type int64 by n. Returns an error if the item's value is
+// not an int64, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt64(k string, n int64) (int64, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(int64)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an int64", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type uint by n. Returns an error if the item's value is
+// not an uint, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementUint(k string, n uint) (uint, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uint)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uint", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type uintptr by n. Returns an error if the item's value
+// is not an uintptr, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUintptr(k string, n uintptr) (uintptr, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uintptr)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uintptr", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type uint8 by n. Returns an error if the item's value is
+// not an uint8, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementUint8(k string, n uint8) (uint8, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uint8)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uint8", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type uint16 by n. Returns an error if the item's value
+// is not an uint16, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUint16(k string, n uint16) (uint16, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uint16)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uint16", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type uint32 by n. Returns an error if the item's value
+// is not an uint32, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUint32(k string, n uint32) (uint32, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uint32)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uint32", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type uint64 by n. Returns an error if the item's value
+// is not an uint64, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUint64(k string, n uint64) (uint64, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(uint64)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an uint64", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type float32 by n. Returns an error if the item's value
+// is not an float32, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementFloat32(k string, n float32) (float32, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(float32)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an float32", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Decrement an item of type float64 by n. Returns an error if the item's value
+// is not an float64, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementFloat64(k string, n float64) (float64, error) {
+ c.mu.Lock()
+ v, found := c.items[k]
+ if !found || v.Expired() {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("Item %s not found", k)
+ }
+ rv, ok := v.Object.(float64)
+ if !ok {
+ c.mu.Unlock()
+ return 0, fmt.Errorf("The value for %s is not an float64", k)
+ }
+ nv := rv - n
+ v.Object = nv
+ c.items[k] = v
+ c.mu.Unlock()
+ return nv, nil
+}
+
+// Delete an item from the cache. Does nothing if the key is not in the cache.
+func (c *cache) Delete(k string) {
+ c.mu.Lock()
+ v, evicted := c.delete(k)
+ c.mu.Unlock()
+ if evicted {
+ c.onEvicted(k, v)
+ }
+}
+
+func (c *cache) delete(k string) (interface{}, bool) {
+ if c.onEvicted != nil {
+ if v, found := c.items[k]; found {
+ delete(c.items, k)
+ return v.Object, true
+ }
+ }
+ delete(c.items, k)
+ return nil, false
+}
+
+type keyAndValue struct {
+ key string
+ value interface{}
+}
+
+// Delete all expired items from the cache.
+func (c *cache) DeleteExpired() {
+ var evictedItems []keyAndValue
+ now := time.Now().UnixNano()
+ c.mu.Lock()
+ for k, v := range c.items {
+ // "Inlining" of expired
+ if v.Expiration > 0 && now > v.Expiration {
+ ov, evicted := c.delete(k)
+ if evicted {
+ evictedItems = append(evictedItems, keyAndValue{k, ov})
+ }
+ }
+ }
+ c.mu.Unlock()
+ for _, v := range evictedItems {
+ c.onEvicted(v.key, v.value)
+ }
+}
+
+// Sets an (optional) function that is called with the key and value when an
+// item is evicted from the cache. (Including when it is deleted manually, but
+// not when it is overwritten.) Set to nil to disable.
+func (c *cache) OnEvicted(f func(string, interface{})) {
+ c.mu.Lock()
+ c.onEvicted = f
+ c.mu.Unlock()
+}
+
+// Write the cache's items (using Gob) to an io.Writer.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) Save(w io.Writer) (err error) {
+ enc := gob.NewEncoder(w)
+ defer func() {
+ if x := recover(); x != nil {
+ err = fmt.Errorf("Error registering item types with Gob library")
+ }
+ }()
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ for _, v := range c.items {
+ gob.Register(v.Object)
+ }
+ err = enc.Encode(&c.items)
+ return
+}
+
+// Save the cache's items to the given filename, creating the file if it
+// doesn't exist, and overwriting it if it does.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) SaveFile(fname string) error {
+ fp, err := os.Create(fname)
+ if err != nil {
+ return err
+ }
+ err = c.Save(fp)
+ if err != nil {
+ fp.Close()
+ return err
+ }
+ return fp.Close()
+}
+
+// Add (Gob-serialized) cache items from an io.Reader, excluding any items with
+// keys that already exist (and haven't expired) in the current cache.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) Load(r io.Reader) error {
+ dec := gob.NewDecoder(r)
+ items := map[string]Item{}
+ err := dec.Decode(&items)
+ if err == nil {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ for k, v := range items {
+ ov, found := c.items[k]
+ if !found || ov.Expired() {
+ c.items[k] = v
+ }
+ }
+ }
+ return err
+}
+
+// Load and add cache items from the given filename, excluding any items with
+// keys that already exist in the current cache.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) LoadFile(fname string) error {
+ fp, err := os.Open(fname)
+ if err != nil {
+ return err
+ }
+ err = c.Load(fp)
+ if err != nil {
+ fp.Close()
+ return err
+ }
+ return fp.Close()
+}
+
+// Copies all unexpired items in the cache into a new map and returns it.
+func (c *cache) Items() map[string]Item {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ m := make(map[string]Item, len(c.items))
+ now := time.Now().UnixNano()
+ for k, v := range c.items {
+ // "Inlining" of Expired
+ if v.Expiration > 0 {
+ if now > v.Expiration {
+ continue
+ }
+ }
+ m[k] = v
+ }
+ return m
+}
+
+// Returns the number of items in the cache. This may include items that have
+// expired, but have not yet been cleaned up.
+func (c *cache) ItemCount() int {
+ c.mu.RLock()
+ n := len(c.items)
+ c.mu.RUnlock()
+ return n
+}
+
+// Delete all items from the cache.
+func (c *cache) Flush() {
+ c.mu.Lock()
+ c.items = map[string]Item{}
+ c.mu.Unlock()
+}
+
+type janitor struct {
+ Interval time.Duration
+ stop chan bool
+}
+
+func (j *janitor) Run(c *cache) {
+ ticker := time.NewTicker(j.Interval)
+ for {
+ select {
+ case <-ticker.C:
+ c.DeleteExpired()
+ case <-j.stop:
+ ticker.Stop()
+ return
+ }
+ }
+}
+
+func stopJanitor(c *Cache) {
+ c.janitor.stop <- true
+}
+
+func runJanitor(c *cache, ci time.Duration) {
+ j := &janitor{
+ Interval: ci,
+ stop: make(chan bool),
+ }
+ c.janitor = j
+ go j.Run(c)
+}
+
+func newCache(de time.Duration, m map[string]Item) *cache {
+ if de == 0 {
+ de = -1
+ }
+ c := &cache{
+ defaultExpiration: de,
+ items: m,
+ }
+ return c
+}
+
+func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item) *Cache {
+ c := newCache(de, m)
+ // This trick ensures that the janitor goroutine (which--granted it
+ // was enabled--is running DeleteExpired on c forever) does not keep
+ // the returned C object from being garbage collected. When it is
+ // garbage collected, the finalizer stops the janitor goroutine, after
+ // which c can be collected.
+ C := &Cache{c}
+ if ci > 0 {
+ runJanitor(c, ci)
+ runtime.SetFinalizer(C, stopJanitor)
+ }
+ return C
+}
+
+// Return a new cache with a given default expiration duration and cleanup
+// interval. If the expiration duration is less than one (or NoExpiration),
+// the items in the cache never expire (by default), and must be deleted
+// manually. If the cleanup interval is less than one, expired items are not
+// deleted from the cache before calling c.DeleteExpired().
+func New(defaultExpiration, cleanupInterval time.Duration) *Cache {
+ items := make(map[string]Item)
+ return newCacheWithJanitor(defaultExpiration, cleanupInterval, items)
+}
+
+// Return a new cache with a given default expiration duration and cleanup
+// interval. If the expiration duration is less than one (or NoExpiration),
+// the items in the cache never expire (by default), and must be deleted
+// manually. If the cleanup interval is less than one, expired items are not
+// deleted from the cache before calling c.DeleteExpired().
+//
+// NewFrom() also accepts an items map which will serve as the underlying map
+// for the cache. This is useful for starting from a deserialized cache
+// (serialized using e.g. gob.Encode() on c.Items()), or passing in e.g.
+// make(map[string]Item, 500) to improve startup performance when the cache
+// is expected to reach a certain minimum size.
+//
+// Only the cache's methods synchronize access to this map, so it is not
+// recommended to keep any references to the map around after creating a cache.
+// If need be, the map can be accessed at a later point using c.Items() (subject
+// to the same caveat.)
+//
+// Note regarding serialization: When using e.g. gob, make sure to
+// gob.Register() the individual types stored in the cache before encoding a
+// map retrieved with c.Items(), and to register those same types before
+// decoding a blob containing an items map.
+func NewFrom(defaultExpiration, cleanupInterval time.Duration, items map[string]Item) *Cache {
+ return newCacheWithJanitor(defaultExpiration, cleanupInterval, items)
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/go-cache/sharded.go b/cluster-autoscaler/cloudprovider/brightbox/go-cache/sharded.go
new file mode 100644
index 000000000000..bcc0538bcc7a
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/go-cache/sharded.go
@@ -0,0 +1,192 @@
+package cache
+
+import (
+ "crypto/rand"
+ "math"
+ "math/big"
+ insecurerand "math/rand"
+ "os"
+ "runtime"
+ "time"
+)
+
+// This is an experimental and unexported (for now) attempt at making a cache
+// with better algorithmic complexity than the standard one, namely by
+// preventing write locks of the entire cache when an item is added. As of the
+// time of writing, the overhead of selecting buckets results in cache
+// operations being about twice as slow as for the standard cache with small
+// total cache sizes, and faster for larger ones.
+//
+// See cache_test.go for a few benchmarks.
+
+type unexportedShardedCache struct {
+ *shardedCache
+}
+
+type shardedCache struct {
+ seed uint32
+ m uint32
+ cs []*cache
+ janitor *shardedJanitor
+}
+
+// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead.
+func djb33(seed uint32, k string) uint32 {
+ var (
+ l = uint32(len(k))
+ d = 5381 + seed + l
+ i = uint32(0)
+ )
+ // Why is all this 5x faster than a for loop?
+ if l >= 4 {
+ for i < l-4 {
+ d = (d * 33) ^ uint32(k[i])
+ d = (d * 33) ^ uint32(k[i+1])
+ d = (d * 33) ^ uint32(k[i+2])
+ d = (d * 33) ^ uint32(k[i+3])
+ i += 4
+ }
+ }
+ switch l - i {
+ case 1:
+ case 2:
+ d = (d * 33) ^ uint32(k[i])
+ case 3:
+ d = (d * 33) ^ uint32(k[i])
+ d = (d * 33) ^ uint32(k[i+1])
+ case 4:
+ d = (d * 33) ^ uint32(k[i])
+ d = (d * 33) ^ uint32(k[i+1])
+ d = (d * 33) ^ uint32(k[i+2])
+ }
+ return d ^ (d >> 16)
+}
+
+func (sc *shardedCache) bucket(k string) *cache {
+ return sc.cs[djb33(sc.seed, k)%sc.m]
+}
+
+func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
+ sc.bucket(k).Set(k, x, d)
+}
+
+func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
+ return sc.bucket(k).Add(k, x, d)
+}
+
+func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
+ return sc.bucket(k).Replace(k, x, d)
+}
+
+func (sc *shardedCache) Get(k string) (interface{}, bool) {
+ return sc.bucket(k).Get(k)
+}
+
+func (sc *shardedCache) Increment(k string, n int64) error {
+ return sc.bucket(k).Increment(k, n)
+}
+
+func (sc *shardedCache) IncrementFloat(k string, n float64) error {
+ return sc.bucket(k).IncrementFloat(k, n)
+}
+
+func (sc *shardedCache) Decrement(k string, n int64) error {
+ return sc.bucket(k).Decrement(k, n)
+}
+
+func (sc *shardedCache) Delete(k string) {
+ sc.bucket(k).Delete(k)
+}
+
+func (sc *shardedCache) DeleteExpired() {
+ for _, v := range sc.cs {
+ v.DeleteExpired()
+ }
+}
+
+// Returns the items in the cache. This may include items that have expired,
+// but have not yet been cleaned up. If this is significant, the Expiration
+// fields of the items should be checked. Note that explicit synchronization
+// is needed to use a cache and its corresponding Items() return values at
+// the same time, as the maps are shared.
+func (sc *shardedCache) Items() []map[string]Item {
+ res := make([]map[string]Item, len(sc.cs))
+ for i, v := range sc.cs {
+ res[i] = v.Items()
+ }
+ return res
+}
+
+func (sc *shardedCache) Flush() {
+ for _, v := range sc.cs {
+ v.Flush()
+ }
+}
+
+type shardedJanitor struct {
+ Interval time.Duration
+ stop chan bool
+}
+
+func (j *shardedJanitor) Run(sc *shardedCache) {
+ j.stop = make(chan bool)
+ tick := time.Tick(j.Interval)
+ for {
+ select {
+ case <-tick:
+ sc.DeleteExpired()
+ case <-j.stop:
+ return
+ }
+ }
+}
+
+func stopShardedJanitor(sc *unexportedShardedCache) {
+ sc.janitor.stop <- true
+}
+
+func runShardedJanitor(sc *shardedCache, ci time.Duration) {
+ j := &shardedJanitor{
+ Interval: ci,
+ }
+ sc.janitor = j
+ go j.Run(sc)
+}
+
+func newShardedCache(n int, de time.Duration) *shardedCache {
+ max := big.NewInt(0).SetUint64(uint64(math.MaxUint32))
+ rnd, err := rand.Int(rand.Reader, max)
+ var seed uint32
+ if err != nil {
+ os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n"))
+ seed = insecurerand.Uint32()
+ } else {
+ seed = uint32(rnd.Uint64())
+ }
+ sc := &shardedCache{
+ seed: seed,
+ m: uint32(n),
+ cs: make([]*cache, n),
+ }
+ for i := 0; i < n; i++ {
+ c := &cache{
+ defaultExpiration: de,
+ items: map[string]Item{},
+ }
+ sc.cs[i] = c
+ }
+ return sc
+}
+
+func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int) *unexportedShardedCache {
+ if defaultExpiration == 0 {
+ defaultExpiration = -1
+ }
+ sc := newShardedCache(shards, defaultExpiration)
+ SC := &unexportedShardedCache{sc}
+ if cleanupInterval > 0 {
+ runShardedJanitor(sc, cleanupInterval)
+ runtime.SetFinalizer(SC, stopShardedJanitor)
+ }
+ return SC
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/Jenkinsfile b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/Jenkinsfile
new file mode 100644
index 000000000000..d36f07bcaf99
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/Jenkinsfile
@@ -0,0 +1,39 @@
+pipeline {
+ options {
+ disableConcurrentBuilds()
+ buildDiscarder(logRotator(numToKeepStr: '5'))
+ }
+ triggers {
+ cron('@weekly')
+ }
+ agent {
+ docker {
+ image 'golang:1.13'
+ label "docker"
+ args "-v /tmp:/.cache"
+ }
+ }
+ stages {
+ stage("Prepare dependencies") {
+ steps {
+ sh 'go get -u github.com/jstemmer/go-junit-report'
+ sh 'go mod download'
+ }
+ }
+ stage("Test") {
+ steps {
+ sh 'go test -v ./... | go-junit-report | tee report.xml'
+ }
+ post {
+ failure {
+ mail to: 'sysadmin@brightbox.co.uk',
+ subject: "Gobrightbox Tests Failed: ${currentBuild.fullDisplayName}",
+ body: "${env.BUILD_URL}"
+ }
+ always {
+ junit "report.xml"
+ }
+ }
+ }
+ }
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/LICENSE.txt b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/LICENSE.txt
new file mode 100644
index 000000000000..28af8a9df078
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2015 Brightbox Systems Ltd.
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/README.md b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/README.md
new file mode 100644
index 000000000000..a77ed8907ee6
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/README.md
@@ -0,0 +1,44 @@
+# Brightbox Golang Client
+
+`gobrightbox` is a [Brightbox Cloud](https://www.brightbox.com) [API](https://api.gb1.brightbox.com/1.0/)
+client implementation written in [Go](http://golang.org/).
+
+Documentation is available at [godoc.org](http://godoc.org/github.com/brightbox/gobrightbox).
+
+## Authentication
+
+This client does not itself handle authentication. Instead, use the standard
+[OAuth2](https://godoc.org/golang.org/x/oauth2) golang library to
+[authenticate](https://api.gb1.brightbox.com/1.0/#authentication) and create
+tokens.
+
+## Currently implemented
+
+* Full [Server](https://api.gb1.brightbox.com/1.0/#server) support
+* Full [Server Group](https://api.gb1.brightbox.com/1.0/#server_group) support
+* Full [CloudIP](https://api.gb1.brightbox.com/1.0/#cloud_ip) support
+* Full [Firewall Policy](https://api.gb1.brightbox.com/1.0/#firewall_policy) support
+* Full [Load Balancer](https://api.gb1.brightbox.com/1.0/#load_balancer) support
+* Full [Cloud SQL](https://api.gb1.brightbox.com/1.0/#database_server) support
+* Full [Api Client](https://api.gb1.brightbox.com/1.0/#api_client) support
+* Basic [Image](https://api.gb1.brightbox.com/1.0/#image) support
+* Basic event stream support
+
+## TODO
+
+* Orbit storage support
+* Collaboration support
+* User support
+* Account support
+* Cloud SQL Snapshot support
+* Cloud SQL Type support
+
+## Help
+
+If you need help using this library, drop an email to support at brightbox dot com.
+
+## License
+
+This code is released under an MIT License.
+
+Copyright (c) 2015-2016 Brightbox Systems Ltd.
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/accounts.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/accounts.go
new file mode 100644
index 000000000000..704ec43459e3
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/accounts.go
@@ -0,0 +1,64 @@
+package gobrightbox
+
+import (
+ "time"
+)
+
+// Account represents a Brightbox Cloud Account
+// https://api.gb1.brightbox.com/1.0/#account
+type Account struct {
+ Id string
+ Name string
+ Status string
+ Address1 string `json:"address_1"`
+ Address2 string `json:"address_2"`
+ City string
+ County string
+ Postcode string
+ CountryCode string `json:"country_code"`
+ CountryName string `json:"country_name"`
+ VatRegistrationNumber string `json:"vat_registration_number"`
+ TelephoneNumber string `json:"telephone_number"`
+ TelephoneVerified bool `json:"telephone_verified"`
+ VerifiedTelephone string `json:"verified_telephone"`
+ VerifiedAt *time.Time `json:"verified_at"`
+ VerifiedIp string `json:"verified_ip"`
+ ValidCreditCard bool `json:"valid_credit_card"`
+ CreatedAt *time.Time `json:"created_at"`
+ RamLimit int `json:"ram_limit"`
+ RamUsed int `json:"ram_used"`
+ DbsRamLimit int `json:"dbs_ram_limit"`
+ DbsRamUsed int `json:"dbs_ram_used"`
+ CloudIpsLimit int `json:"cloud_ips_limit"`
+ CloudIpsUsed int `json:"cloud_ips_used"`
+ LoadBalancersLimit int `json:"load_balancers_limit"`
+ LoadBalancersUsed int `json:"load_balancers_used"`
+ LibraryFtpHost string `json:"library_ftp_host"`
+ LibraryFtpUser string `json:"library_ftp_user"`
+ LibraryFtpPassword string `json:"library_ftp_password"`
+ Owner User
+ Users []User
+}
+
+// Accounts retrieves a list of all accounts associated with the client.
+//
+// API Clients are only ever associated with one single account. User clients
+// can have multiple accounts, through collaborations.
+func (c *Client) Accounts() ([]Account, error) {
+ var accounts []Account
+ _, err := c.MakeApiRequest("GET", "/1.0/accounts?nested=false", nil, &accounts)
+ if err != nil {
+ return nil, err
+ }
+ return accounts, err
+}
+
+// Account retrieves a detailed view of one account
+func (c *Client) Account(identifier string) (*Account, error) {
+ account := new(Account)
+ _, err := c.MakeApiRequest("GET", "/1.0/accounts/"+identifier, nil, account)
+ if err != nil {
+ return nil, err
+ }
+ return account, err
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/api_clients.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/api_clients.go
new file mode 100644
index 000000000000..7d10701668e0
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/api_clients.go
@@ -0,0 +1,89 @@
+package gobrightbox
+
+import (
+ "time"
+)
+
+// ApiClient represents an API client.
+// https://api.gb1.brightbox.com/1.0/#api_client
+type ApiClient struct {
+ Id string
+ Name string
+ Description string
+ Secret string
+ PermissionsGroup string `json:"permissions_group"`
+ RevokedAt *time.Time `json:"revoked_at"`
+ Account Account
+}
+
+// ApiClientOptions is used in conjunction with CreateApiClient and
+// UpdateApiClient to create and update api clients
+type ApiClientOptions struct {
+ Id string `json:"-"`
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ PermissionsGroup *string `json:"permissions_group,omitempty"`
+}
+
+// ApiClients retrieves a list of all API clients
+func (c *Client) ApiClients() ([]ApiClient, error) {
+ var apiClients []ApiClient
+ _, err := c.MakeApiRequest("GET", "/1.0/api_clients", nil, &apiClients)
+ if err != nil {
+ return nil, err
+ }
+ return apiClients, err
+}
+
+// ApiClient retrieves a detailed view of one API client
+func (c *Client) ApiClient(identifier string) (*ApiClient, error) {
+ apiClient := new(ApiClient)
+ _, err := c.MakeApiRequest("GET", "/1.0/api_clients/"+identifier, nil, apiClient)
+ if err != nil {
+ return nil, err
+ }
+ return apiClient, err
+}
+
+// CreateApiClient creates a new API client.
+//
+// It takes a ApiClientOptions struct for specifying name and other
+// attributes. Not all attributes can be specified at create time
+// (such as Id, which is allocated for you)
+func (c *Client) CreateApiClient(options *ApiClientOptions) (*ApiClient, error) {
+ ac := new(ApiClient)
+ _, err := c.MakeApiRequest("POST", "/1.0/api_clients", options, &ac)
+ if err != nil {
+ return nil, err
+ }
+ return ac, nil
+}
+
+// UpdateApiClient updates an existing api client.
+//
+// It takes a ApiClientOptions struct for specifying Id, name and other
+// attributes. Not all attributes can be specified at update time.
+func (c *Client) UpdateApiClient(options *ApiClientOptions) (*ApiClient, error) {
+ ac := new(ApiClient)
+ _, err := c.MakeApiRequest("PUT", "/1.0/api_clients/"+options.Id, options, &ac)
+ if err != nil {
+ return nil, err
+ }
+ return ac, nil
+}
+
+// DestroyApiClient issues a request to deletes an existing api client
+func (c *Client) DestroyApiClient(identifier string) error {
+ _, err := c.MakeApiRequest("DELETE", "/1.0/api_clients/"+identifier, nil, nil)
+ return err
+}
+
+// ResetSecretForApiClient requests a snapshot of an existing api client
+func (c *Client) ResetSecretForApiClient(identifier string) (*ApiClient, error) {
+ ac := new(ApiClient)
+ _, err := c.MakeApiRequest("POST", "/1.0/api_clients/"+identifier+"/reset_secret", nil, &ac)
+ if err != nil {
+ return nil, err
+ }
+ return ac, nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/brightbox.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/brightbox.go
new file mode 100644
index 000000000000..b44b7c96ca89
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/brightbox.go
@@ -0,0 +1,236 @@
+// Package brightbox is for interacting with the Brightbox Cloud API
+//
+// Brightbox Cloud is a UK-based infrastructure-as-a-service
+// provider. More details available at https://www.brightbox.com
+//
+// The Brightbox Cloud API documentation is available at
+// https://api.gb1.brightbox.com/1.0/
+package gobrightbox
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/linkheader"
+)
+
+const (
+ // DefaultRegionApiURL is the default API URL for the region. Use with NewClient.
+ DefaultRegionApiURL = "https://api.gb1.brightbox.com/"
+ // DefaultOrbitAuthURL is the default Auth URL for Orbit.
+ DefaultOrbitAuthURL = "https://orbit.brightbox.com/v1/"
+)
+
+// Client represents a connection to the Brightbox API. You should use NewClient
+// to allocate and configure Clients. Authentication is handled externally by a
+// http.Client with the appropriate Transport, such as those provided by
+// https://github.com/golang/oauth2/
+type Client struct {
+ BaseURL *url.URL
+ client *http.Client
+ UserAgent string
+ // The identifier of the account to use by default with this Client.
+ AccountId string
+}
+
+// ApiError can be returned when an API request fails. It provides any error
+// messages provided by the API, along with other details about the response.
+type ApiError struct {
+ // StatusCode will hold the HTTP status code from the request that errored
+ StatusCode int
+ // Status will hold the HTTP status line from the request that errored
+ Status string
+ // AuthError will hold any available OAuth "error" field contents. See
+ // https://api.gb1.brightbox.com/1.0/#errors
+ AuthError string `json:"error"`
+ // AuthErrorDescription will hold any available OAuth "error_description"
+ // field contents. See https://api.gb1.brightbox.com/1.0/#errors
+ AuthErrorDescription string `json:"error_description"`
+ // ErrorName will hold any available Brightbox API "error_name" field
+ // contents. See https://api.gb1.brightbox.com/1.0/#request_errors
+ ErrorName string `json:"error_name"`
+ // Errors will hold any available Brightbox API "errors" field contents. See
+ // https://api.gb1.brightbox.com/1.0/#request_errors
+ Errors []string `json:"errors"`
+ // ParseError will hold any errors from the JSON parser whilst parsing an
+ // API response
+ ParseError *error
+ // RequestUrl will hold the full URL used to make the request that errored,
+ // if available
+ RequestUrl *url.URL
+ // ResponseBody will hold the raw respose body of the request that errored,
+ // if available
+ ResponseBody []byte
+}
+
+func (e ApiError) Error() string {
+ var url string
+ if e.RequestUrl != nil {
+ url = e.RequestUrl.String()
+ }
+ if e.ParseError != nil {
+ return fmt.Sprintf("%d: %s: %s", e.StatusCode, url, *e.ParseError)
+ }
+
+ var msg string
+ if e.AuthError != "" {
+ msg = fmt.Sprintf("%s, %s", e.AuthError, e.AuthErrorDescription)
+ }
+ if e.ErrorName != "" {
+ msg = e.ErrorName
+ if len(e.Errors) == 1 {
+ msg = msg + ": " + e.Errors[0]
+ } else if len(e.Errors) > 1 {
+ msg = fmt.Sprintf("%s: %s", msg, e.Errors)
+ }
+
+ }
+ if msg == "" {
+ msg = fmt.Sprintf("%s: %s", e.Status, url)
+ }
+ return msg
+}
+
+// NewClient allocates and configures a Client for interacting with the API.
+//
+// apiURL should be an url of the form https://api.region.brightbox.com,
+// e.g: https://api.gb1.brightbox.com. You can use the default defined in
+// this package instead, i.e. brightbox.DefaultRegionApiURL
+//
+// accountId should be the identifier of the default account to be used with
+// this Client. Clients authenticated with Brightbox ApiClient credentials are
+// only ever associated with one single Account, so you can leave this empty for
+// those. Client's authenticated with Brightbox User credentials can have access
+// to multiple accounts, so this parameter should be provided.
+//
+// httpClient should be a http.Client with a transport that will handle the
+// OAuth token authentication, such as those provided by
+// https://github.com/golang/oauth2/
+func NewClient(apiURL string, accountID string, httpClient *http.Client) (*Client, error) {
+ if httpClient == nil {
+ httpClient = http.DefaultClient
+ }
+ au, err := url.Parse(apiURL)
+ if err != nil {
+ return nil, err
+ }
+
+ c := &Client{
+ client: httpClient,
+ BaseURL: au,
+ AccountId: accountID,
+ }
+ return c, nil
+}
+
+// NewRequest allocates and configures a http.Request ready to make an API call.
+//
+// method should be the desired http method, e.g: "GET", "POST", "PUT" etc.
+//
+// urlStr should be the url path, relative to the api url e.g: "/1.0/servers"
+//
+// if body is non-nil, it will be Marshaled to JSON and set as the request body
+func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) {
+ rel, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, err
+ }
+
+ u := c.BaseURL.ResolveReference(rel)
+
+ if c.AccountId != "" {
+ q := u.Query()
+ q.Set("account_id", c.AccountId)
+ u.RawQuery = q.Encode()
+ }
+
+ var buf io.ReadWriter
+ if body != nil {
+ buf = new(bytes.Buffer)
+ err := json.NewEncoder(buf).Encode(body)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ req, err := http.NewRequest(method, u.String(), buf)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Add("Accept", "application/json")
+ req.Header.Add("Content-Type", "application/json")
+
+ if c.UserAgent != "" {
+ req.Header.Add("User-Agent", c.UserAgent)
+ }
+ return req, nil
+}
+
+// MakeApiRequest makes a http request to the API, JSON encoding any given data
+// and decoding any JSON reponse.
+//
+// method should be the desired http method, e.g: "GET", "POST", "PUT" etc.
+//
+// urlStr should be the url path, relative to the api url e.g: "/1.0/servers"
+//
+// if reqBody is non-nil, it will be Marshaled to JSON and set as the request
+// body.
+//
+// Optionally, the response body will be Unmarshaled from JSON into whatever
+// resBody is a pointer to. Leave nil to skip.
+//
+// If the response is non-2xx, MakeApiRequest will try to parse the error
+// message and return an ApiError struct.
+func (c *Client) MakeApiRequest(method string, path string, reqBody interface{}, resBody interface{}) (*http.Response, error) {
+ req, err := c.NewRequest(method, path, reqBody)
+ if err != nil {
+ return nil, err
+ }
+ res, err := c.client.Do(req)
+ if err != nil {
+ return res, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode >= 200 && res.StatusCode <= 299 {
+ if resBody != nil {
+ err := json.NewDecoder(res.Body).Decode(resBody)
+ if err != nil {
+ return res, ApiError{
+ RequestUrl: res.Request.URL,
+ StatusCode: res.StatusCode,
+ Status: res.Status,
+ ParseError: &err,
+ }
+ }
+ }
+ return res, nil
+ }
+ apierr := ApiError{
+ RequestUrl: res.Request.URL,
+ StatusCode: res.StatusCode,
+ Status: res.Status,
+ }
+ body, _ := ioutil.ReadAll(res.Body)
+ err = json.Unmarshal(body, &apierr)
+ apierr.ResponseBody = body
+ return res, apierr
+}
+
+func getLinkRel(header string, prefix string, rel string) *string {
+ links := linkheader.Parse(header)
+ re := regexp.MustCompile(prefix + "-[^/]+")
+ for _, link := range links {
+ id := re.FindString(link.URL)
+ if id != "" && link.Rel == rel {
+ return &id
+ }
+ }
+ return nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/build.sh b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/build.sh
new file mode 100644
index 000000000000..57800760bc45
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/build.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+set -ex
+
+go get -t -v -d
+go test -v
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/cloud_ips.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/cloud_ips.go
new file mode 100644
index 000000000000..322ea76de558
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/cloud_ips.go
@@ -0,0 +1,140 @@
+package gobrightbox
+
+import (
+ "fmt"
+)
+
+// CloudIP represents a Cloud IP
+// https://api.gb1.brightbox.com/1.0/#cloud_ip
+type CloudIP struct {
+ Id string
+ Name string
+ PublicIP string `json:"public_ip"`
+ PublicIPv4 string `json:"public_ipv4"`
+ PublicIPv6 string `json:"public_ipv6"`
+ Status string
+ ReverseDns string `json:"reverse_dns"`
+ PortTranslators []PortTranslator `json:"port_translators"`
+ Account Account
+ Fqdn string
+ Interface *ServerInterface
+ Server *Server
+ ServerGroup *ServerGroup `json:"server_group"`
+ LoadBalancer *LoadBalancer `json:"load_balancer"`
+ DatabaseServer *DatabaseServer `json:"database_server"`
+}
+
+// PortTranslator represents a port translator on a Cloud IP
+type PortTranslator struct {
+ Incoming int `json:"incoming"`
+ Outgoing int `json:"outgoing"`
+ Protocol string `json:"protocol"`
+}
+
+// CloudIPOptions is used in conjunction with CreateCloudIP and UpdateCloudIP to
+// create and update cloud IPs.
+type CloudIPOptions struct {
+ Id string `json:"-"`
+ ReverseDns *string `json:"reverse_dns,omitempty"`
+ Name *string `json:"name,omitempty"`
+ PortTranslators []PortTranslator `json:"port_translators,omitempty"`
+}
+
+// CloudIPs retrieves a list of all cloud ips
+func (c *Client) CloudIPs() ([]CloudIP, error) {
+ var cloudips []CloudIP
+ _, err := c.MakeApiRequest("GET", "/1.0/cloud_ips", nil, &cloudips)
+ if err != nil {
+ return nil, err
+ }
+ return cloudips, err
+}
+
+// CloudIP retrieves a detailed view of one cloud ip
+func (c *Client) CloudIP(identifier string) (*CloudIP, error) {
+ cloudip := new(CloudIP)
+ _, err := c.MakeApiRequest("GET", "/1.0/cloud_ips/"+identifier, nil, cloudip)
+ if err != nil {
+ return nil, err
+ }
+ return cloudip, err
+}
+
+// DestroyCloudIP issues a request to destroy the cloud ip
+func (c *Client) DestroyCloudIP(identifier string) error {
+ _, err := c.MakeApiRequest("DELETE", "/1.0/cloud_ips/"+identifier, nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// CreateCloudIP creates a new Cloud IP.
+//
+// It takes a CloudIPOptions struct for specifying name and other attributes.
+// Not all attributes can be specified at create time (such as Id, which is
+// allocated for you)
+func (c *Client) CreateCloudIP(newCloudIP *CloudIPOptions) (*CloudIP, error) {
+ cloudip := new(CloudIP)
+ _, err := c.MakeApiRequest("POST", "/1.0/cloud_ips", newCloudIP, &cloudip)
+ if err != nil {
+ return nil, err
+ }
+ return cloudip, nil
+}
+
+// UpdateCloudIP updates an existing cloud ip's attributes. Not all attributes
+// can be changed after creation time (such as Id, which is allocated for you).
+//
+// Specify the cloud ip you want to update using the CloudIPOptions Id field
+func (c *Client) UpdateCloudIP(updateCloudIP *CloudIPOptions) (*CloudIP, error) {
+ cip := new(CloudIP)
+ _, err := c.MakeApiRequest("PUT", "/1.0/cloud_ips/"+updateCloudIP.Id, updateCloudIP, &cip)
+ if err != nil {
+ return nil, err
+ }
+ return cip, nil
+}
+
+// MapCloudIP issues a request to map the cloud ip to the destination. The
+// destination can be an identifier of any resource capable of receiving a Cloud
+// IP, such as a server interface, a load balancer, or a cloud sql instace.
+//
+// To map a Cloud IP to a server, first lookup the server to get it's interface
+// identifier (or use the MapCloudIPtoServer convenience method)
+func (c *Client) MapCloudIP(identifier string, destination string) error {
+ _, err := c.MakeApiRequest("POST", "/1.0/cloud_ips/"+identifier+"/map",
+ map[string]string{"destination": destination}, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// MapCloudIPtoServer is a convenience method to map a Cloud IP to a
+// server. First looks up the server to get the network interface id. Uses the
+// first interface found.
+func (c *Client) MapCloudIPtoServer(identifier string, serverid string) error {
+ server, err := c.Server(serverid)
+ if err != nil {
+ return err
+ }
+ if len(server.Interfaces) == 0 {
+ return fmt.Errorf("Server %s has no interfaces to map cloud ip %s to", server.Id, identifier)
+ }
+ destination := server.Interfaces[0].Id
+ err = c.MapCloudIP(identifier, destination)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// UnMapCloudIP issues a request to unmap the cloud ip.
+func (c *Client) UnMapCloudIP(identifier string) error {
+ _, err := c.MakeApiRequest("POST", "/1.0/cloud_ips/"+identifier+"/unmap", nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/collaborations.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/collaborations.go
new file mode 100644
index 000000000000..7096dea8dc97
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/collaborations.go
@@ -0,0 +1,42 @@
+package gobrightbox
+
+import (
+ "time"
+)
+
+// Collaboration represents a User's links to it's Accounts
+// https://api.gb1.brightbox.com/1.0/#user
+type Collaboration struct {
+ Id string
+ Email string
+ Role string
+ RoleLabel string `json:"role_label"`
+ Status string
+ CreatedAt *time.Time `json:"created_at"`
+ StartedAt *time.Time `json:"started_at"`
+ FinishedAt *time.Time `json:"finished_at"`
+ Account Account
+ User User
+ Inviter User
+}
+
+// Collaborations retrieves a list of all the current user's collaborations
+func (c *Client) Collaborations() ([]Collaboration, error) {
+ var cl []Collaboration
+ _, err := c.MakeApiRequest("GET", "/1.0/user/collaborations", nil, &cl)
+ if err != nil {
+ return nil, err
+ }
+ return cl, err
+}
+
+// Collaboration retrieves a detailed view of one of the current user's
+// collaborations
+func (c *Client) Collaboration(identifier string) (*Collaboration, error) {
+ col := new(Collaboration)
+ _, err := c.MakeApiRequest("GET", "/1.0/user/collaborations/"+identifier, nil, col)
+ if err != nil {
+ return nil, err
+ }
+ return col, err
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/config_maps.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/config_maps.go
new file mode 100644
index 000000000000..45ca56473fa8
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/config_maps.go
@@ -0,0 +1,73 @@
+package gobrightbox
+
+// ConfigMap represents a config map
+// https://api.gb1.brightbox.com/1.0/#config_maps
+type ConfigMap struct {
+ Id string `json:"id"`
+ Name string `json:"name"`
+ Data map[string]interface{} `json:"data"`
+}
+
+// ConfigMapOptions is used in combination with CreateConfigMap and
+// UpdateConfigMap to create and update config maps
+type ConfigMapOptions struct {
+ Id string `json:"-"`
+ Name *string `json:"name,omitempty"`
+ Data *map[string]interface{} `json:"data,omitempty"`
+}
+
+// ConfigMaps retrieves a list of all config maps
+func (c *Client) ConfigMaps() ([]ConfigMap, error) {
+ var configMaps []ConfigMap
+ _, err := c.MakeApiRequest("GET", "/1.0/config_maps", nil, &configMaps)
+ if err != nil {
+ return nil, err
+ }
+ return configMaps, err
+}
+
+// ConfigMap retrieves a detailed view on one config map
+func (c *Client) ConfigMap(identifier string) (*ConfigMap, error) {
+ configMap := new(ConfigMap)
+ _, err := c.MakeApiRequest("GET", "/1.0/config_maps/"+identifier, nil, configMap)
+ if err != nil {
+ return nil, err
+ }
+ return configMap, err
+}
+
+// CreateConfigMap creates a new config map
+//
+// It takes an instance of ConfigMapOptions. Not all attributes can be
+// specified at create time (such as Id, which is allocated for you).
+func (c *Client) CreateConfigMap(newConfigMap *ConfigMapOptions) (*ConfigMap, error) {
+ configMap := new(ConfigMap)
+ _, err := c.MakeApiRequest("POST", "/1.0/config_maps", newConfigMap, &configMap)
+ if err != nil {
+ return nil, err
+ }
+ return configMap, nil
+}
+
+// UpdateConfigMap updates an existing config maps's attributes. Not all
+// attributes can be changed (such as Id).
+//
+// Specify the config map you want to update using the ConfigMapOptions Id
+// field.
+func (c *Client) UpdateConfigMap(updateConfigMap *ConfigMapOptions) (*ConfigMap, error) {
+ configMap := new(ConfigMap)
+ _, err := c.MakeApiRequest("PUT", "/1.0/config_maps/"+updateConfigMap.Id, updateConfigMap, &configMap)
+ if err != nil {
+ return nil, err
+ }
+ return configMap, nil
+}
+
+// DestroyConfigMap destroys an existing config map
+func (c *Client) DestroyConfigMap(identifier string) error {
+ _, err := c.MakeApiRequest("DELETE", "/1.0/config_maps/"+identifier, nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/database_server_types.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/database_server_types.go
new file mode 100644
index 000000000000..38f272e4d535
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/database_server_types.go
@@ -0,0 +1,29 @@
+package gobrightbox
+
+// DatabaseDatabaseServerType represents a database server type
+// https://api.gb1.brightbox.com/1.0/#database_type
+type DatabaseServerType struct {
+ Id string
+ Name string
+ Description string
+ DiskSize int `json:"disk_size"`
+ RAM int
+}
+
+func (c *Client) DatabaseServerTypes() ([]DatabaseServerType, error) {
+ var databaseservertypes []DatabaseServerType
+ _, err := c.MakeApiRequest("GET", "/1.0/database_types", nil, &databaseservertypes)
+ if err != nil {
+ return nil, err
+ }
+ return databaseservertypes, err
+}
+
+func (c *Client) DatabaseServerType(identifier string) (*DatabaseServerType, error) {
+ databaseservertype := new(DatabaseServerType)
+ _, err := c.MakeApiRequest("GET", "/1.0/database_types/"+identifier, nil, databaseservertype)
+ if err != nil {
+ return nil, err
+ }
+ return databaseservertype, err
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/database_servers.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/database_servers.go
new file mode 100644
index 000000000000..418eac57f5ce
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/database_servers.go
@@ -0,0 +1,127 @@
+package gobrightbox
+
+import (
+ "time"
+)
+
+// DatabaseServer represents a database server.
+// https://api.gb1.brightbox.com/1.0/#database_server
+type DatabaseServer struct {
+ Id string
+ Name string
+ Description string
+ Status string
+ Account Account
+ DatabaseEngine string `json:"database_engine"`
+ DatabaseVersion string `json:"database_version"`
+ AdminUsername string `json:"admin_username"`
+ AdminPassword string `json:"admin_password"`
+ CreatedAt *time.Time `json:"created_at"`
+ UpdatedAt *time.Time `json:"updated_at"`
+ DeletedAt *time.Time `json:"deleted_at"`
+ SnapshotsScheduleNextAt *time.Time `json:"snapshots_schedule_next_at"`
+ AllowAccess []string `json:"allow_access"`
+ MaintenanceWeekday int `json:"maintenance_weekday"`
+ MaintenanceHour int `json:"maintenance_hour"`
+ SnapshotsSchedule string `json:"snapshots_schedule"`
+ CloudIPs []CloudIP `json:"cloud_ips"`
+ DatabaseServerType DatabaseServerType `json:"database_server_type"`
+ Locked bool
+ Zone Zone
+}
+
+// DatabaseServerOptions is used in conjunction with CreateDatabaseServer and
+// UpdateDatabaseServer to create and update database servers.
+type DatabaseServerOptions struct {
+ Id string `json:"-"`
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Engine string `json:"engine,omitempty"`
+ Version string `json:"version,omitempty"`
+ AllowAccess []string `json:"allow_access,omitempty"`
+ Snapshot string `json:"snapshot,omitempty"`
+ Zone string `json:"zone,omitempty"`
+ DatabaseType string `json:"database_type,omitempty"`
+ MaintenanceWeekday *int `json:"maintenance_weekday,omitempty"`
+ MaintenanceHour *int `json:"maintenance_hour,omitempty"`
+ SnapshotsSchedule *string `json:"snapshots_schedule,omitempty"`
+}
+
+// DatabaseServers retrieves a list of all database servers
+func (c *Client) DatabaseServers() ([]DatabaseServer, error) {
+ var dbs []DatabaseServer
+ _, err := c.MakeApiRequest("GET", "/1.0/database_servers", nil, &dbs)
+ if err != nil {
+ return nil, err
+ }
+ return dbs, err
+}
+
+// DatabaseServer retrieves a detailed view of one database server
+func (c *Client) DatabaseServer(identifier string) (*DatabaseServer, error) {
+ dbs := new(DatabaseServer)
+ _, err := c.MakeApiRequest("GET", "/1.0/database_servers/"+identifier, nil, dbs)
+ if err != nil {
+ return nil, err
+ }
+ return dbs, err
+}
+
+// CreateDatabaseServer creates a new database server.
+//
+// It takes a DatabaseServerOptions struct for specifying name and other
+// attributes. Not all attributes can be specified at create time
+// (such as Id, which is allocated for you)
+func (c *Client) CreateDatabaseServer(options *DatabaseServerOptions) (*DatabaseServer, error) {
+ dbs := new(DatabaseServer)
+ _, err := c.MakeApiRequest("POST", "/1.0/database_servers", options, &dbs)
+ if err != nil {
+ return nil, err
+ }
+ return dbs, nil
+}
+
+// UpdateDatabaseServer updates an existing database server.
+//
+// It takes a DatabaseServerOptions struct for specifying Id, name and other
+// attributes. Not all attributes can be specified at update time.
+func (c *Client) UpdateDatabaseServer(options *DatabaseServerOptions) (*DatabaseServer, error) {
+ dbs := new(DatabaseServer)
+ _, err := c.MakeApiRequest("PUT", "/1.0/database_servers/"+options.Id, options, &dbs)
+ if err != nil {
+ return nil, err
+ }
+ return dbs, nil
+}
+
+// DestroyDatabaseServer issues a request to deletes an existing database server
+func (c *Client) DestroyDatabaseServer(identifier string) error {
+ _, err := c.MakeApiRequest("DELETE", "/1.0/database_servers/"+identifier, nil, nil)
+ return err
+}
+
+// SnapshotDatabaseServer requests a snapshot of an existing database server.
+func (c *Client) SnapshotDatabaseServer(identifier string) (*DatabaseSnapshot, error) {
+ dbs := new(DatabaseServer)
+ res, err := c.MakeApiRequest("POST", "/1.0/database_servers/"+identifier+"/snapshot", nil, &dbs)
+ if err != nil {
+ return nil, err
+ }
+ snapID := getLinkRel(res.Header.Get("Link"), "dbi", "snapshot")
+ if snapID != nil {
+ snap := new(DatabaseSnapshot)
+ snap.Id = *snapID
+ return snap, nil
+ }
+ return nil, nil
+}
+
+// ResetPasswordForDatabaseServer requests a snapshot of an existing database server.
+func (c *Client) ResetPasswordForDatabaseServer(identifier string) (*DatabaseServer, error) {
+ dbs := new(DatabaseServer)
+ _, err := c.MakeApiRequest("POST", "/1.0/database_servers/"+identifier+"/reset_password", nil, &dbs)
+ if err != nil {
+ return nil, err
+ }
+ return dbs, nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/database_snapshot.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/database_snapshot.go
new file mode 100644
index 000000000000..c3ea0f236ee2
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/database_snapshot.go
@@ -0,0 +1,51 @@
+package gobrightbox
+
+import (
+ "time"
+)
+
+// DatabaseSnapshot represents a snapshot of a database server.
+// https://api.gb1.brightbox.com/1.0/#database_snapshot
+type DatabaseSnapshot struct {
+ Id string
+ Name string
+ Description string
+ Status string
+ Account Account
+ DatabaseEngine string `json:"database_engine"`
+ DatabaseVersion string `json:"database_version"`
+ Size int
+ CreatedAt *time.Time `json:"created_at"`
+ UpdatedAt *time.Time `json:"updated_at"`
+ DeletedAt *time.Time `json:"deleted_at"`
+ Locked bool
+}
+
+// DatabaseSnapshots retrieves a list of all database snapshot
+func (c *Client) DatabaseSnapshots() ([]DatabaseSnapshot, error) {
+ var database_snapshot []DatabaseSnapshot
+ _, err := c.MakeApiRequest("GET", "/1.0/database_snapshots", nil, &database_snapshot)
+ if err != nil {
+ return nil, err
+ }
+ return database_snapshot, err
+}
+
+// DatabaseSnapshot retrieves a detailed view of one database snapshot
+func (c *Client) DatabaseSnapshot(identifier string) (*DatabaseSnapshot, error) {
+ database_snapshot := new(DatabaseSnapshot)
+ _, err := c.MakeApiRequest("GET", "/1.0/database_snapshots/"+identifier, nil, database_snapshot)
+ if err != nil {
+ return nil, err
+ }
+ return database_snapshot, err
+}
+
+// DestroyDatabaseSnapshot issues a request to destroy the database snapshot
+func (c *Client) DestroyDatabaseSnapshot(identifier string) error {
+ _, err := c.MakeApiRequest("DELETE", "/1.0/database_snapshots/"+identifier, nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/firewall_policies.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/firewall_policies.go
new file mode 100644
index 000000000000..905e19c3b814
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/firewall_policies.go
@@ -0,0 +1,111 @@
+package gobrightbox
+
+import (
+ "time"
+)
+
+// FirewallPolicy represents a firewall policy.
+// https://api.gb1.brightbox.com/1.0/#firewall_policy
+type FirewallPolicy struct {
+ Id string
+ Name string
+ Default bool
+ CreatedAt time.Time `json:"created_at"`
+ Description string
+ ServerGroup *ServerGroup `json:"server_group"`
+ Rules []FirewallRule `json:"rules"`
+}
+
+// FirewallPolicyOptions is used in conjunction with CreateFirewallPolicy and
+// UpdateFirewallPolicy to create and update firewall policies.
+type FirewallPolicyOptions struct {
+ Id string `json:"-"`
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ ServerGroup *string `json:"server_group,omitempty"`
+}
+
+// FirewallPolicies retrieves a list of all firewall policies
+func (c *Client) FirewallPolicies() ([]FirewallPolicy, error) {
+ var policies []FirewallPolicy
+ _, err := c.MakeApiRequest("GET", "/1.0/firewall_policies", nil, &policies)
+ if err != nil {
+ return nil, err
+ }
+ return policies, err
+}
+
+// FirewallPolicy retrieves a detailed view of one firewall policy
+func (c *Client) FirewallPolicy(identifier string) (*FirewallPolicy, error) {
+ policy := new(FirewallPolicy)
+ _, err := c.MakeApiRequest("GET", "/1.0/firewall_policies/"+identifier, nil, policy)
+ if err != nil {
+ return nil, err
+ }
+ return policy, err
+}
+
+// CreateFirewallPolicy creates a new firewall policy.
+//
+// It takes a FirewallPolicyOptions struct for specifying name and other
+// attributes. Not all attributes can be specified at create time (such as Id,
+// which is allocated for you)
+func (c *Client) CreateFirewallPolicy(policyOptions *FirewallPolicyOptions) (*FirewallPolicy, error) {
+ policy := new(FirewallPolicy)
+ _, err := c.MakeApiRequest("POST", "/1.0/firewall_policies", policyOptions, &policy)
+ if err != nil {
+ return nil, err
+ }
+ return policy, nil
+}
+
+// UpdateFirewallPolicy updates an existing firewall policy.
+//
+// It takes a FirewallPolicyOptions struct for specifying name and other
+// attributes. Not all attributes can be update(such as server_group which is
+// instead changed with ApplyFirewallPolicy).
+//
+// Specify the policy you want to update using the Id field
+func (c *Client) UpdateFirewallPolicy(policyOptions *FirewallPolicyOptions) (*FirewallPolicy, error) {
+ policy := new(FirewallPolicy)
+ _, err := c.MakeApiRequest("PUT", "/1.0/firewall_policies/"+policyOptions.Id, policyOptions, &policy)
+ if err != nil {
+ return nil, err
+ }
+ return policy, nil
+}
+
+// DestroyFirewallPolicy issues a request to destroy the firewall policy
+func (c *Client) DestroyFirewallPolicy(identifier string) error {
+ _, err := c.MakeApiRequest("DELETE", "/1.0/firewall_policies/"+identifier, nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// ApplyFirewallPolicy issues a request to apply the given firewall policy to
+// the given server group.
+//
+func (c *Client) ApplyFirewallPolicy(policyId string, serverGroupId string) (*FirewallPolicy, error) {
+ policy := new(FirewallPolicy)
+ _, err := c.MakeApiRequest("POST", "/1.0/firewall_policies/"+policyId+"/apply_to",
+ map[string]string{"server_group": serverGroupId}, &policy)
+ if err != nil {
+ return nil, err
+ }
+ return policy, nil
+}
+
+// RemoveFirewallPolicy issues a request to remove the given firewall policy from
+// the given server group.
+//
+func (c *Client) RemoveFirewallPolicy(policyId string, serverGroupId string) (*FirewallPolicy, error) {
+ policy := new(FirewallPolicy)
+ _, err := c.MakeApiRequest("POST", "/1.0/firewall_policies/"+policyId+"/remove",
+ map[string]string{"server_group": serverGroupId}, &policy)
+ if err != nil {
+ return nil, err
+ }
+ return policy, nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/firewall_rules.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/firewall_rules.go
new file mode 100644
index 000000000000..8f18f75ab9f5
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/firewall_rules.go
@@ -0,0 +1,80 @@
+package gobrightbox
+
+import (
+ "time"
+)
+
+// FirewallRule represents a firewall rule.
+// https://api.gb1.brightbox.com/1.0/#firewall_rule
+type FirewallRule struct {
+ Id string
+ Source string `json:"source"`
+ SourcePort string `json:"source_port"`
+ Destination string `json:"destination"`
+ DestinationPort string `json:"destination_port"`
+ Protocol string `json:"protocol"`
+ IcmpTypeName string `json:"icmp_type_name"`
+ CreatedAt time.Time `json:"created_at"`
+ Description string `json:"description"`
+ FirewallPolicy FirewallPolicy `json:"firewall_policy"`
+}
+
+// FirewallRuleOptions is used in conjunction with CreateFirewallRule and
+// UpdateFirewallRule to create and update firewall rules.
+type FirewallRuleOptions struct {
+ Id string `json:"-"`
+ FirewallPolicy string `json:"firewall_policy,omitempty"`
+ Protocol *string `json:"protocol,omitempty"`
+ Source *string `json:"source,omitempty"`
+ SourcePort *string `json:"source_port,omitempty"`
+ Destination *string `json:"destination,omitempty"`
+ DestinationPort *string `json:"destination_port,omitempty"`
+ IcmpTypeName *string `json:"icmp_type_name,omitempty"`
+ Description *string `json:"description,omitempty"`
+}
+
+// FirewallRule retrieves a detailed view of one firewall rule
+func (c *Client) FirewallRule(identifier string) (*FirewallRule, error) {
+ rule := new(FirewallRule)
+ _, err := c.MakeApiRequest("GET", "/1.0/firewall_rules/"+identifier, nil, rule)
+ if err != nil {
+ return nil, err
+ }
+ return rule, err
+}
+
+// CreateFirewallRule creates a new firewall rule.
+//
+// It takes a FirewallRuleOptions struct for specifying name and other
+// attributes. Not all attributes can be specified at create time
+// (such as Id, which is allocated for you)
+func (c *Client) CreateFirewallRule(ruleOptions *FirewallRuleOptions) (*FirewallRule, error) {
+ rule := new(FirewallRule)
+ _, err := c.MakeApiRequest("POST", "/1.0/firewall_rules", ruleOptions, &rule)
+ if err != nil {
+ return nil, err
+ }
+ return rule, nil
+}
+
+// UpdateFirewallRule updates an existing firewall rule.
+//
+// It takes a FirewallRuleOptions struct for specifying the attributes. Not all
+// attributes can be updated (such as firewall_policy)
+func (c *Client) UpdateFirewallRule(ruleOptions *FirewallRuleOptions) (*FirewallRule, error) {
+ rule := new(FirewallRule)
+ _, err := c.MakeApiRequest("PUT", "/1.0/firewall_rules/"+ruleOptions.Id, ruleOptions, &rule)
+ if err != nil {
+ return nil, err
+ }
+ return rule, nil
+}
+
+// DestroyFirewallRule destroys an existing firewall rule
+func (c *Client) DestroyFirewallRule(identifier string) error {
+ _, err := c.MakeApiRequest("DELETE", "/1.0/firewall_rules/"+identifier, nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/images.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/images.go
new file mode 100644
index 000000000000..4f6060609a0b
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/images.go
@@ -0,0 +1,57 @@
+package gobrightbox
+
+import (
+ "time"
+)
+
+// Image represents a Machine Image
+// https://api.gb1.brightbox.com/1.0/#image
+type Image struct {
+ Id string
+ Name string
+ Username string
+ Status string
+ Locked bool
+ Description string
+ Source string
+ Arch string
+ CreatedAt time.Time `json:"created_at"`
+ Official bool
+ Public bool
+ Owner string
+ SourceType string `json:"source_type"`
+ VirtualSize int `json:"virtual_size"`
+ DiskSize int `json:"disk_size"`
+ CompatibilityMode bool `json:"compatibility_mode"`
+ AncestorId string `json:"ancestor_id"`
+ LicenceName string `json:"licence_name"`
+}
+
+// Images retrieves a list of all images
+func (c *Client) Images() ([]Image, error) {
+ var images []Image
+ _, err := c.MakeApiRequest("GET", "/1.0/images", nil, &images)
+ if err != nil {
+ return nil, err
+ }
+ return images, err
+}
+
+// Image retrieves a detailed view of one image
+func (c *Client) Image(identifier string) (*Image, error) {
+ image := new(Image)
+ _, err := c.MakeApiRequest("GET", "/1.0/images/"+identifier, nil, image)
+ if err != nil {
+ return nil, err
+ }
+ return image, err
+}
+
+// DestroyImage issues a request to destroy the image
+func (c *Client) DestroyImage(identifier string) error {
+ _, err := c.MakeApiRequest("DELETE", "/1.0/images/"+identifier, nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/load_balancers.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/load_balancers.go
new file mode 100644
index 000000000000..fce5bd1bb32f
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/load_balancers.go
@@ -0,0 +1,200 @@
+package gobrightbox
+
+import (
+ "time"
+)
+
+// LoadBalancer represents a Load Balancer
+// https://api.gb1.brightbox.com/1.0/#load_balancer
+type LoadBalancer struct {
+ Id string
+ Name string
+ Status string
+ CreatedAt *time.Time `json:"created_at"`
+ DeletedAt *time.Time `json:"deleted_at"`
+ Locked bool
+ HttpsRedirect bool `json:"https_redirect"`
+ SslMinimumVersion string `json:"ssl_minimum_version"`
+ Account Account
+ Nodes []Server
+ CloudIPs []CloudIP `json:"cloud_ips"`
+ Policy string
+ BufferSize int `json:"buffer_size"`
+ Listeners []LoadBalancerListener
+ Healthcheck LoadBalancerHealthcheck
+ Certificate *LoadBalancerCertificate
+ Acme *LoadBalancerAcme
+}
+
+// LoadBalancerCertificate represents a certificate on a LoadBalancer
+type LoadBalancerCertificate struct {
+ ExpiresAt time.Time `json:"expires_at"`
+ ValidFrom time.Time `json:"valid_from"`
+ SslV3 bool `json:"sslv3"`
+ Issuer string `json:"issuer"`
+ Subject string `json:"subject"`
+}
+
+// LoadBalancerAcme represents an ACME object on a LoadBalancer
+type LoadBalancerAcme struct {
+ Certificate *LoadBalancerAcmeCertificate `json:"certificate"`
+ Domains []LoadBalancerAcmeDomain `json:"domains"`
+}
+
+// LoadBalancerAcmeCertificate represents an ACME issued certificate on
+// a LoadBalancer
+type LoadBalancerAcmeCertificate struct {
+ Fingerprint string `json:"fingerprint"`
+ ExpiresAt time.Time `json:"expires_at"`
+ IssuedAt time.Time `json:"issued_at"`
+}
+
+// LoadBalancerAcmeDomain represents a domain for which ACME support
+// has been requested
+type LoadBalancerAcmeDomain struct {
+ Identifier string `json:"identifier"`
+ Status string `json:"status"`
+ LastMessage string `json:"last_message"`
+}
+
+// LoadBalancerHealthcheck represents a health check on a LoadBalancer
+type LoadBalancerHealthcheck struct {
+ Type string `json:"type"`
+ Port int `json:"port"`
+ Request string `json:"request,omitempty"`
+ Interval int `json:"interval,omitempty"`
+ Timeout int `json:"timeout,omitempty"`
+ ThresholdUp int `json:"threshold_up,omitempty"`
+ ThresholdDown int `json:"threshold_down,omitempty"`
+}
+
+// LoadBalancerListener represents a listener on a LoadBalancer
+type LoadBalancerListener struct {
+ Protocol string `json:"protocol,omitempty"`
+ In int `json:"in,omitempty"`
+ Out int `json:"out,omitempty"`
+ Timeout int `json:"timeout,omitempty"`
+ ProxyProtocol string `json:"proxy_protocol,omitempty"`
+}
+
+// LoadBalancerOptions is used in conjunction with CreateLoadBalancer and
+// UpdateLoadBalancer to create and update load balancers
+type LoadBalancerOptions struct {
+ Id string `json:"-"`
+ Name *string `json:"name,omitempty"`
+ Nodes []LoadBalancerNode `json:"nodes,omitempty"`
+ Policy *string `json:"policy,omitempty"`
+ BufferSize *int `json:"buffer_size,omitempty"`
+ Listeners []LoadBalancerListener `json:"listeners,omitempty"`
+ Healthcheck *LoadBalancerHealthcheck `json:"healthcheck,omitempty"`
+ Domains *[]string `json:"domains,omitempty"`
+ CertificatePem *string `json:"certificate_pem,omitempty"`
+ CertificatePrivateKey *string `json:"certificate_private_key,omitempty"`
+ SslMinimumVersion *string `json:"ssl_minimum_version,omitempty"`
+ SslV3 *bool `json:"sslv3,omitempty"`
+ HttpsRedirect *bool `json:"https_redirect,omitempty"`
+}
+
+// LoadBalancerNode is used in conjunction with LoadBalancerOptions,
+// AddNodesToLoadBalancer, RemoveNodesFromLoadBalancer to specify a list of
+// servers to use as load balancer nodes. The Node parameter should be a server
+// identifier.
+type LoadBalancerNode struct {
+ Node string `json:"node"`
+}
+
+// LoadBalancers retrieves a list of all load balancers
+func (c *Client) LoadBalancers() ([]LoadBalancer, error) {
+ var lbs []LoadBalancer
+ _, err := c.MakeApiRequest("GET", "/1.0/load_balancers", nil, &lbs)
+ if err != nil {
+ return nil, err
+ }
+ return lbs, err
+}
+
+// LoadBalancer retrieves a detailed view of one load balancer
+func (c *Client) LoadBalancer(identifier string) (*LoadBalancer, error) {
+ lb := new(LoadBalancer)
+ _, err := c.MakeApiRequest("GET", "/1.0/load_balancers/"+identifier, nil, lb)
+ if err != nil {
+ return nil, err
+ }
+ return lb, err
+}
+
+// CreateLoadBalancer creates a new load balancer.
+//
+// It takes a LoadBalancerOptions struct for specifying name and other
+// attributes. Not all attributes can be specified at create time (such as Id,
+// which is allocated for you)
+func (c *Client) CreateLoadBalancer(newLB *LoadBalancerOptions) (*LoadBalancer, error) {
+ lb := new(LoadBalancer)
+ _, err := c.MakeApiRequest("POST", "/1.0/load_balancers", newLB, &lb)
+ if err != nil {
+ return nil, err
+ }
+ return lb, nil
+}
+
+// UpdateLoadBalancer updates an existing load balancer.
+//
+// It takes a LoadBalancerOptions struct for specifying name and other
+// attributes. Provide the identifier using the Id attribute.
+func (c *Client) UpdateLoadBalancer(newLB *LoadBalancerOptions) (*LoadBalancer, error) {
+ lb := new(LoadBalancer)
+ _, err := c.MakeApiRequest("PUT", "/1.0/load_balancers/"+newLB.Id, newLB, &lb)
+ if err != nil {
+ return nil, err
+ }
+ return lb, nil
+}
+
+// DestroyLoadBalancer issues a request to destroy the load balancer
+func (c *Client) DestroyLoadBalancer(identifier string) error {
+ _, err := c.MakeApiRequest("DELETE", "/1.0/load_balancers/"+identifier, nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// AddNodesToLoadBalancer adds nodes to an existing load balancer.
+func (c *Client) AddNodesToLoadBalancer(loadBalancerID string, nodes []LoadBalancerNode) (*LoadBalancer, error) {
+ lb := new(LoadBalancer)
+ _, err := c.MakeApiRequest("POST", "/1.0/load_balancers/"+loadBalancerID+"/add_nodes", nodes, &lb)
+ if err != nil {
+ return nil, err
+ }
+ return lb, nil
+}
+
+// RemoveNodesFromLoadBalancer removes nodes from an existing load balancer.
+func (c *Client) RemoveNodesFromLoadBalancer(loadBalancerID string, nodes []LoadBalancerNode) (*LoadBalancer, error) {
+ lb := new(LoadBalancer)
+ _, err := c.MakeApiRequest("POST", "/1.0/load_balancers/"+loadBalancerID+"/remove_nodes", nodes, &lb)
+ if err != nil {
+ return nil, err
+ }
+ return lb, nil
+}
+
+// AddListenersToLoadBalancer adds listeners to an existing load balancer.
+func (c *Client) AddListenersToLoadBalancer(loadBalancerID string, listeners []LoadBalancerListener) (*LoadBalancer, error) {
+ lb := new(LoadBalancer)
+ _, err := c.MakeApiRequest("POST", "/1.0/load_balancers/"+loadBalancerID+"/add_listeners", listeners, &lb)
+ if err != nil {
+ return nil, err
+ }
+ return lb, nil
+}
+
+// RemoveListenersFromLoadBalancer removes listeners to an existing load balancer.
+func (c *Client) RemoveListenersFromLoadBalancer(loadBalancerID string, listeners []LoadBalancerListener) (*LoadBalancer, error) {
+ lb := new(LoadBalancer)
+ _, err := c.MakeApiRequest("POST", "/1.0/load_balancers/"+loadBalancerID+"/remove_listeners", listeners, &lb)
+ if err != nil {
+ return nil, err
+ }
+ return lb, nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/resource_locking.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/resource_locking.go
new file mode 100644
index 000000000000..fc71c3b2a5ec
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/resource_locking.go
@@ -0,0 +1,58 @@
+package gobrightbox
+
+import (
+ "fmt"
+)
+
+func resourcePath(resource interface{}) (string, error) {
+ switch resource := resource.(type) {
+ default:
+ return "", fmt.Errorf("Unknown resource type %s", resource)
+ case *Server:
+ return "servers/" + resource.Id, nil
+ case Server:
+ return "servers/" + resource.Id, nil
+ case *Image:
+ return "images/" + resource.Id, nil
+ case Image:
+ return "images/" + resource.Id, nil
+ case *LoadBalancer:
+ return "load_balancers/" + resource.Id, nil
+ case LoadBalancer:
+ return "load_balancers/" + resource.Id, nil
+ case *DatabaseServer:
+ return "database_servers/" + resource.Id, nil
+ case DatabaseServer:
+ return "database_servers/" + resource.Id, nil
+ case *ApiClient:
+ return "api_clients/" + resource.Id, nil
+ case ApiClient:
+ return "api_clients/" + resource.Id, nil
+ }
+}
+
+// LockResource locks a resource against destroy requests. Support brightbox.Server, brightbox.Image, brightbox.DatabaseServer and brightbox.LoadBalancer
+func (c *Client) LockResource(resource interface{}) error {
+ rpath, err := resourcePath(resource)
+ if err != nil {
+ return err
+ }
+ _, err = c.MakeApiRequest("PUT", fmt.Sprintf("/1.0/%s/lock_resource", rpath), nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// UnLockResource unlocks a resource, renabling destroy requests
+func (c *Client) UnLockResource(resource interface{}) error {
+ rpath, err := resourcePath(resource)
+ if err != nil {
+ return err
+ }
+ _, err = c.MakeApiRequest("PUT", fmt.Sprintf("/1.0/%s/unlock_resource", rpath), nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/server_groups.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/server_groups.go
new file mode 100644
index 000000000000..f73b8f36c5ee
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/server_groups.go
@@ -0,0 +1,150 @@
+package gobrightbox
+
+import (
+ "time"
+)
+
+// ServerGroup represents a server group
+// https://api.gb1.brightbox.com/1.0/#server_group
+type ServerGroup struct {
+ Id string
+ Name string
+ CreatedAt *time.Time `json:"created_at"`
+ Description string
+ Default bool
+ Fqdn string
+ Account Account `json:"account"`
+ Servers []Server
+ FirewallPolicy *FirewallPolicy `json:"firewall_policy"`
+}
+
+// ServerGroupOptions is used in combination with CreateServerGroup and
+// UpdateServerGroup to create and update server groups
+type ServerGroupOptions struct {
+ Id string `json:"-"`
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+}
+
+type serverGroupMemberOptions struct {
+ Servers []serverGroupMember `json:"servers"`
+ Destination string `json:"destination,omitempty"`
+}
+type serverGroupMember struct {
+ Server string `json:"server,omitempty"`
+}
+
+// ServerGroups retrieves a list of all server groups
+func (c *Client) ServerGroups() ([]ServerGroup, error) {
+ var groups []ServerGroup
+ _, err := c.MakeApiRequest("GET", "/1.0/server_groups", nil, &groups)
+ if err != nil {
+ return nil, err
+ }
+ return groups, err
+}
+
+// ServerGroup retrieves a detailed view on one server group
+func (c *Client) ServerGroup(identifier string) (*ServerGroup, error) {
+ group := new(ServerGroup)
+ _, err := c.MakeApiRequest("GET", "/1.0/server_groups/"+identifier, nil, group)
+ if err != nil {
+ return nil, err
+ }
+ return group, err
+}
+
+// CreateServerGroup creates a new server group
+//
+// It takes an instance of ServerGroupOptions. Not all attributes can be
+// specified at create time (such as Id, which is allocated for you).
+func (c *Client) CreateServerGroup(newServerGroup *ServerGroupOptions) (*ServerGroup, error) {
+ group := new(ServerGroup)
+ _, err := c.MakeApiRequest("POST", "/1.0/server_groups", newServerGroup, &group)
+ if err != nil {
+ return nil, err
+ }
+ return group, nil
+}
+
+// UpdateServerGroup updates an existing server groups's attributes. Not all
+// attributes can be changed (such as Id).
+//
+// Specify the server group you want to update using the ServerGroupOptions Id
+// field.
+//
+// To change group memberships, use AddServersToServerGroup,
+// RemoveServersFromServerGroup and MoveServersToServerGroup.
+func (c *Client) UpdateServerGroup(updateServerGroup *ServerGroupOptions) (*ServerGroup, error) {
+ group := new(ServerGroup)
+ _, err := c.MakeApiRequest("PUT", "/1.0/server_groups/"+updateServerGroup.Id, updateServerGroup, &group)
+ if err != nil {
+ return nil, err
+ }
+ return group, nil
+}
+
+// DestroyServerGroup destroys an existing server group
+func (c *Client) DestroyServerGroup(identifier string) error {
+ _, err := c.MakeApiRequest("DELETE", "/1.0/server_groups/"+identifier, nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// AddServersToServerGroup adds servers to an existing server group.
+//
+// The identifier parameter specifies the destination group.
+//
+// The serverIds paramater specifies the identifiers of the servers you want to add.
+func (c *Client) AddServersToServerGroup(identifier string, serverIds []string) (*ServerGroup, error) {
+ group := new(ServerGroup)
+ opts := new(serverGroupMemberOptions)
+ for _, id := range serverIds {
+ opts.Servers = append(opts.Servers, serverGroupMember{Server: id})
+ }
+ _, err := c.MakeApiRequest("POST", "/1.0/server_groups/"+identifier+"/add_servers", opts, &group)
+ if err != nil {
+ return nil, err
+ }
+ return group, nil
+}
+
+// RemoveServersToServerGroup removes servers from an existing server group.
+//
+// The identifier parameter specifies the group.
+//
+// The serverIds paramater specifies the identifiers of the servers you want to remove.
+func (c *Client) RemoveServersFromServerGroup(identifier string, serverIds []string) (*ServerGroup, error) {
+ group := new(ServerGroup)
+ opts := new(serverGroupMemberOptions)
+ for _, id := range serverIds {
+ opts.Servers = append(opts.Servers, serverGroupMember{Server: id})
+ }
+ _, err := c.MakeApiRequest("POST", "/1.0/server_groups/"+identifier+"/remove_servers", opts, &group)
+ if err != nil {
+ return nil, err
+ }
+ return group, nil
+}
+
+// MoveServersToServerGroup atomically moves servers from one group to another.
+//
+// The src parameter specifies the group to which the servers currently belong
+//
+// The dst parameter specifies the group to which you want to move the servers.
+//
+// The serverIds parameter specifies the identifiers of the servers you want to move.
+func (c *Client) MoveServersToServerGroup(src string, dst string, serverIds []string) (*ServerGroup, error) {
+ group := new(ServerGroup)
+ opts := serverGroupMemberOptions{Destination: dst}
+ for _, id := range serverIds {
+ opts.Servers = append(opts.Servers, serverGroupMember{Server: id})
+ }
+ _, err := c.MakeApiRequest("POST", "/1.0/server_groups/"+src+"/move_servers", opts, &group)
+ if err != nil {
+ return nil, err
+ }
+ return group, nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/server_types.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/server_types.go
new file mode 100644
index 000000000000..ccb17df19456
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/server_types.go
@@ -0,0 +1,46 @@
+package gobrightbox
+
+import (
+ "fmt"
+)
+
+type ServerType struct {
+ Id string
+ Name string
+ Status string
+ Handle string
+ Cores int
+ Ram int
+ DiskSize int `json:"disk_size"`
+}
+
+func (c *Client) ServerTypes() ([]ServerType, error) {
+ var servertypes []ServerType
+ _, err := c.MakeApiRequest("GET", "/1.0/server_types", nil, &servertypes)
+ if err != nil {
+ return nil, err
+ }
+ return servertypes, err
+}
+
+func (c *Client) ServerType(identifier string) (*ServerType, error) {
+ servertype := new(ServerType)
+ _, err := c.MakeApiRequest("GET", "/1.0/server_types/"+identifier, nil, servertype)
+ if err != nil {
+ return nil, err
+ }
+ return servertype, err
+}
+
+func (c *Client) ServerTypeByHandle(handle string) (*ServerType, error) {
+ servertypes, err := c.ServerTypes()
+ if err != nil {
+ return nil, err
+ }
+ for _, servertype := range servertypes {
+ if servertype.Handle == handle {
+ return &servertype, nil
+ }
+ }
+ return nil, fmt.Errorf("ServerType with handle '%s' doesn't exist", handle)
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/servers.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/servers.go
new file mode 100644
index 000000000000..3d78768b527e
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/servers.go
@@ -0,0 +1,227 @@
+package gobrightbox
+
+import (
+ "net/url"
+ "time"
+)
+
+// Server represents a Cloud Server
+// https://api.gb1.brightbox.com/1.0/#server
+type Server struct {
+ Id string
+ Name string
+ Status string
+ Locked bool
+ Hostname string
+ Fqdn string
+ CreatedAt *time.Time `json:"created_at"`
+ // DeletedAt is nil if the server has not yet been deleted
+ DeletedAt *time.Time `json:"deleted_at"`
+ StartedAt *time.Time `json:"started_at"`
+ UserData string `json:"user_data"`
+ CompatibilityMode bool `json:"compatibility_mode"`
+ DiskEncrypted bool `json:"disk_encrypted"`
+ ServerConsole
+ Account Account
+ Image Image
+ ServerType ServerType `json:"server_type"`
+ Zone Zone
+ Snapshots []Image
+ CloudIPs []CloudIP `json:"cloud_ips"`
+ Interfaces []ServerInterface
+ ServerGroups []ServerGroup `json:"server_groups"`
+}
+
+// ServerConsole is embedded into Server and contains the fields used in reponse
+// to an ActivateConsoleForServer request.
+type ServerConsole struct {
+ ConsoleToken string `json:"console_token"`
+ ConsoleUrl string `json:"console_url"`
+ ConsoleTokenExpires *time.Time `json:"console_token_expires"`
+}
+
+// ServerOptions is used in conjunction with CreateServer and UpdateServer to
+// create and update servers.
+type ServerOptions struct {
+ Id string `json:"-"`
+ Image string `json:"image,omitempty"`
+ Name *string `json:"name,omitempty"`
+ ServerType string `json:"server_type,omitempty"`
+ Zone string `json:"zone,omitempty"`
+ UserData *string `json:"user_data,omitempty"`
+ ServerGroups []string `json:"server_groups,omitempty"`
+ CompatibilityMode *bool `json:"compatibility_mode,omitempty"`
+ DiskEncrypted *bool `json:"disk_encrypted,omitempty"`
+}
+
+// ServerInterface represent a server's network interface(s)
+type ServerInterface struct {
+ Id string
+ MacAddress string `json:"mac_address"`
+ IPv4Address string `json:"ipv4_address"`
+ IPv6Address string `json:"ipv6_address"`
+}
+
+// Servers retrieves a list of all servers
+func (c *Client) Servers() ([]Server, error) {
+ var servers []Server
+ _, err := c.MakeApiRequest("GET", "/1.0/servers", nil, &servers)
+ if err != nil {
+ return nil, err
+ }
+ return servers, err
+}
+
+// Server retrieves a detailed view of one server
+func (c *Client) Server(identifier string) (*Server, error) {
+ server := new(Server)
+ _, err := c.MakeApiRequest("GET", "/1.0/servers/"+identifier, nil, server)
+ if err != nil {
+ return nil, err
+ }
+ return server, err
+}
+
+// CreateServer creates a new server.
+//
+// It takes a ServerOptions struct which requires, at minimum, a valid Image
+// identifier. Not all attributes can be specified at create time (such as Id,
+// which is allocated for you)
+func (c *Client) CreateServer(newServer *ServerOptions) (*Server, error) {
+ server := new(Server)
+ _, err := c.MakeApiRequest("POST", "/1.0/servers", newServer, &server)
+ if err != nil {
+ return nil, err
+ }
+ return server, nil
+}
+
+// UpdateServer updates an existing server's attributes. Not all attributes can
+// be changed after creation time (such as Image, ServerType and Zone).
+//
+// Specify the server you want to update using the ServerOptions Id field
+func (c *Client) UpdateServer(updateServer *ServerOptions) (*Server, error) {
+ server := new(Server)
+ _, err := c.MakeApiRequest("PUT", "/1.0/servers/"+updateServer.Id, updateServer, &server)
+ if err != nil {
+ return nil, err
+ }
+ return server, nil
+}
+
+// DestroyServer issues a request to destroy the server
+func (c *Client) DestroyServer(identifier string) error {
+ _, err := c.MakeApiRequest("DELETE", "/1.0/servers/"+identifier, nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// StopServer issues a request to stop ("power off") an existing server
+func (c *Client) StopServer(identifier string) error {
+ _, err := c.MakeApiRequest("POST", "/1.0/servers/"+identifier+"/stop", nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// StartServer issues a request to start ("power on") an existing server
+func (c *Client) StartServer(identifier string) error {
+ _, err := c.MakeApiRequest("POST", "/1.0/servers/"+identifier+"/start", nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RebootServer issues a request to reboot ("ctrl+alt+delete") an existing
+// server
+func (c *Client) RebootServer(identifier string) error {
+ _, err := c.MakeApiRequest("POST", "/1.0/servers/"+identifier+"/reboot", nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// ResetServer issues a request to reset ("power cycle") an existing server
+func (c *Client) ResetServer(identifier string) error {
+ _, err := c.MakeApiRequest("POST", "/1.0/servers/"+identifier+"/reset", nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// ShutdownServer issues a request to shut down ("tap the power button") an
+// existing server
+func (c *Client) ShutdownServer(identifier string) error {
+ _, err := c.MakeApiRequest("POST", "/1.0/servers/"+identifier+"/shutdown", nil, nil)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// LockServer locks an existing server, preventing it's destruction without
+// first unlocking. Deprecated, use LockResource instead.
+func (c *Client) LockServer(identifier string) error {
+ return c.LockResource(Server{Id: identifier})
+}
+
+// UnlockServer unlocks a previously locked existing server, allowing
+// destruction again. Deprecated, use UnLockResource instead.
+func (c *Client) UnlockServer(identifier string) error {
+ return c.UnLockResource(Server{Id: identifier})
+}
+
+// SnapshotServer issues a request to snapshot the disk of an existing
+// server. The snapshot is allocated an Image Id which is returned within an
+// instance of Image.
+func (c *Client) SnapshotServer(identifier string) (*Image, error) {
+ res, err := c.MakeApiRequest("POST", "/1.0/servers/"+identifier+"/snapshot", nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ imageID := getLinkRel(res.Header.Get("Link"), "img", "snapshot")
+ if imageID != nil {
+ img := new(Image)
+ img.Id = *imageID
+ return img, nil
+ }
+ return nil, nil
+}
+
+// ActivateConsoleForServer issues a request to enable the graphical console for
+// an existing server. The temporarily allocated ConsoleUrl, ConsoleToken and
+// ConsoleTokenExpires data are returned within an instance of Server.
+func (c *Client) ActivateConsoleForServer(identifier string) (*Server, error) {
+ server := new(Server)
+ _, err := c.MakeApiRequest("POST", "/1.0/servers/"+identifier+"/activate_console", nil, server)
+ if err != nil {
+ return nil, err
+ }
+ return server, nil
+}
+
+// FullConsoleUrl returns the console url for the server with the token in the
+// query string. Server needs a ConsoleUrl and ConsoleToken, retrieved using
+// ActivateConsoleForServer
+func (s *Server) FullConsoleUrl() string {
+ if s.ConsoleUrl == "" || s.ConsoleToken == "" {
+ return s.ConsoleUrl
+ }
+ u, err := url.Parse(s.ConsoleUrl)
+ if u == nil || err != nil {
+ return s.ConsoleUrl
+ }
+ values := u.Query()
+ if values.Get("password") != "" {
+ return s.ConsoleUrl
+ }
+ values.Set("password", s.ConsoleToken)
+ u.RawQuery = values.Encode()
+ return u.String()
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/status/constants.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/status/constants.go
new file mode 100644
index 000000000000..09090b063b3d
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/status/constants.go
@@ -0,0 +1,49 @@
+package status
+
+// Status constants. Can be used to compare to api status fields
+const (
+ Available = "available"
+ Creating = "creating"
+ Deleted = "deleted"
+ Deleting = "deleting"
+ Deprecated = "deprecated"
+ Failing = "failing"
+ Failed = "failed"
+ Pending = "pending"
+ Unavailable = "unavailable"
+)
+
+// Account additional status constants. Compare with Account status fields.
+const (
+ Closed = "closed"
+ Overdue = "overdue"
+ Suspended = "suspended"
+ Terminated = "terminated"
+ Warning = "warning"
+)
+
+// Cloud IP additional status constants. Compare with Cloud IP status fields.
+const (
+ Mapped = "mapped"
+ Reserved = "reserved"
+ Unmapped = "unmapped"
+)
+
+// Collaboration additional status constants. Compare with Collaboration status fields.
+const (
+ Accepted = "accepted"
+ Cancelled = "cancelled"
+ Ended = "ended"
+ Rejected = "rejected"
+)
+
+// Server Type additional status constants. Compare with Server Type status fields.
+const (
+ Experimental = "experimental"
+)
+
+// Server additional status constants. Compare with Server status fields.
+const (
+ Active = "active"
+ Inactive = "inactive"
+)
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/users.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/users.go
new file mode 100644
index 000000000000..a124fd59ec33
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/users.go
@@ -0,0 +1,14 @@
+package gobrightbox
+
+// User represents a Brightbox User
+// https://api.gb1.brightbox.com/1.0/#user
+type User struct {
+ Id string
+ Name string
+ EmailAddress string `json:"email_address"`
+ EmailVerified bool `json:"email_verified"`
+ SshKey string `json:"ssh_key"`
+ MessagingPref bool `json:"messaging_pref"`
+ Accounts []*Account
+ DefaultAccount *Account `json:"default_account"`
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/zones.go b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/zones.go
new file mode 100644
index 000000000000..16fe7bfb782f
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/zones.go
@@ -0,0 +1,41 @@
+package gobrightbox
+
+import (
+ "fmt"
+)
+
+type Zone struct {
+ Id string
+ Handle string
+}
+
+func (c *Client) Zones() ([]Zone, error) {
+ var zones []Zone
+ _, err := c.MakeApiRequest("GET", "/1.0/zones", nil, &zones)
+ if err != nil {
+ return nil, err
+ }
+ return zones, err
+}
+
+func (c *Client) Zone(identifier string) (*Zone, error) {
+ zone := new(Zone)
+ _, err := c.MakeApiRequest("GET", "/1.0/zones/"+identifier, nil, zone)
+ if err != nil {
+ return nil, err
+ }
+ return zone, err
+}
+
+func (c *Client) ZoneByHandle(handle string) (*Zone, error) {
+ zones, err := c.Zones()
+ if err != nil {
+ return nil, err
+ }
+ for _, zone := range zones {
+ if zone.Handle == handle {
+ return &zone, nil
+ }
+ }
+ return nil, fmt.Errorf("Zone with handle '%s' doesn't exist", handle)
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/.gitignore b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/.gitignore
new file mode 100644
index 000000000000..17df183458e8
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/.gitignore
@@ -0,0 +1,16 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+*.swp
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/LICENSE b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/LICENSE
new file mode 100644
index 000000000000..a143a0bbd338
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Brightbox Systems Ltd
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/README.md b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/README.md
new file mode 100644
index 000000000000..df53a2a417a7
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/README.md
@@ -0,0 +1,2 @@
+# k8ssdk
+Brightbox API SDK for kubernetes applications
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/brightbox_auth.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/brightbox_auth.go
new file mode 100644
index 000000000000..0bc38d57726f
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/brightbox_auth.go
@@ -0,0 +1,138 @@
+// Copyright 2020 Brightbox Systems Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package k8ssdk
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/clientcredentials"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/cached"
+ klog "k8s.io/klog/v2"
+)
+
+const (
+ defaultClientID = "app-dkmch"
+ defaultClientSecret = "uogoelzgt0nwawb"
+ clientEnvVar = "BRIGHTBOX_CLIENT"
+ clientSecretEnvVar = "BRIGHTBOX_CLIENT_SECRET"
+ usernameEnvVar = "BRIGHTBOX_USER_NAME"
+ passwordEnvVar = "BRIGHTBOX_PASSWORD"
+ accountEnvVar = "BRIGHTBOX_ACCOUNT"
+ apiURLEnvVar = "BRIGHTBOX_API_URL"
+
+ defaultTimeoutSeconds = 10
+
+ ValidAcmeDomainStatus = "valid"
+)
+
+var infrastructureScope = []string{"infrastructure"}
+
+type authdetails struct {
+ APIClient string
+ APISecret string
+ UserName string
+ password string
+ Account string
+ APIURL string
+}
+
+// obtainCloudClient creates a new Brightbox client using details from
+// the environment
+func obtainCloudClient() (CloudAccess, error) {
+ klog.V(4).Infof("obtainCloudClient")
+ config := &authdetails{
+ APIClient: getenvWithDefault(clientEnvVar,
+ defaultClientID),
+ APISecret: getenvWithDefault(clientSecretEnvVar,
+ defaultClientSecret),
+ UserName: os.Getenv(usernameEnvVar),
+ password: os.Getenv(passwordEnvVar),
+ Account: os.Getenv(accountEnvVar),
+ APIURL: os.Getenv(apiURLEnvVar),
+ }
+ err := config.validateConfig()
+ if err != nil {
+ return nil, err
+ }
+ return config.authenticatedClient()
+}
+
+// Validate account config entries
+func (authd *authdetails) validateConfig() error {
+ klog.V(4).Infof("validateConfig")
+ if authd.APIClient == defaultClientID &&
+ authd.APISecret == defaultClientSecret {
+ if authd.Account == "" {
+ return fmt.Errorf("must specify Account with User Credentials")
+ }
+ } else {
+ if authd.UserName != "" || authd.password != "" {
+ return fmt.Errorf("User Credentials not used with API Client")
+ }
+ }
+ return nil
+}
+
+// Authenticate the details and return a client
+func (authd *authdetails) authenticatedClient() (CloudAccess, error) {
+ ctx := context.Background()
+ switch {
+ case authd.UserName != "" || authd.password != "":
+ return authd.tokenisedAuth(ctx)
+ default:
+ return authd.apiClientAuth(ctx)
+ }
+}
+
+func (authd *authdetails) tokenURL() string {
+ return authd.APIURL + "/token"
+}
+
+func (authd *authdetails) tokenisedAuth(ctx context.Context) (CloudAccess, error) {
+ conf := oauth2.Config{
+ ClientID: authd.APIClient,
+ ClientSecret: authd.APISecret,
+ Scopes: infrastructureScope,
+ Endpoint: oauth2.Endpoint{
+ TokenURL: authd.tokenURL(),
+ AuthStyle: oauth2.AuthStyleInHeader,
+ },
+ }
+ klog.V(4).Infof("Obtaining authentication for user %s", authd.UserName)
+ klog.V(4).Infof("Speaking to %s", authd.tokenURL())
+ token, err := conf.PasswordCredentialsToken(ctx, authd.UserName, authd.password)
+ if err != nil {
+ return nil, err
+ }
+ klog.V(4).Infof("Refreshing current token as required")
+ oauthConnection := conf.Client(ctx, token)
+ return cached.NewClient(authd.APIURL, authd.Account, oauthConnection)
+}
+
+func (authd *authdetails) apiClientAuth(ctx context.Context) (CloudAccess, error) {
+ conf := clientcredentials.Config{
+ ClientID: authd.APIClient,
+ ClientSecret: authd.APISecret,
+ Scopes: infrastructureScope,
+ TokenURL: authd.tokenURL(),
+ }
+ klog.V(4).Infof("Obtaining API client authorisation for client %s", authd.APIClient)
+ klog.V(4).Infof("Speaking to %s", authd.tokenURL())
+ oauthConnection := conf.Client(ctx)
+ return cached.NewClient(authd.APIURL, authd.Account, oauthConnection)
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/brightbox_interface.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/brightbox_interface.go
new file mode 100644
index 000000000000..4bd7e38f1c95
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/brightbox_interface.go
@@ -0,0 +1,557 @@
+// Copyright 2020 Brightbox Systems Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package k8ssdk
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+
+ brightbox "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox/status"
+ klog "k8s.io/klog/v2"
+)
+
+// GetServer retrieves a Brightbox Cloud Server
+func (c *Cloud) GetServer(ctx context.Context, id string, notFoundError error) (*brightbox.Server, error) {
+ klog.V(4).Infof("getServer (%q)", id)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ srv, err := client.Server(id)
+ if err != nil {
+ if isNotFound(err) {
+ return nil, notFoundError
+ }
+ return nil, err
+ }
+ return srv, nil
+}
+
+func isNotFound(e error) bool {
+ switch v := e.(type) {
+ case brightbox.ApiError:
+ return v.StatusCode == http.StatusNotFound
+ default:
+ return false
+ }
+}
+
+// CreateServer creates a Brightbox Cloud Server
+func (c *Cloud) CreateServer(newDetails *brightbox.ServerOptions) (*brightbox.Server, error) {
+ klog.V(4).Infof("CreateServer (%q)", *newDetails.Name)
+ klog.V(6).Infof("%+v", newDetails)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.CreateServer(newDetails)
+}
+
+func isAlive(lb *brightbox.LoadBalancer) bool {
+ return lb.Status == status.Active || lb.Status == status.Creating
+}
+func trimmed(name string) string {
+ return strings.TrimSpace(
+ strings.TrimSuffix(
+ strings.TrimSpace(name),
+ "#type:container",
+ ),
+ )
+}
+
+// GetLoadBalancerByName finds a Load Balancer from its name
+func (c *Cloud) GetLoadBalancerByName(name string) (*brightbox.LoadBalancer, error) {
+ klog.V(4).Infof("GetLoadBalancerByName (%q)", name)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ lbList, err := client.LoadBalancers()
+ if err != nil {
+ return nil, err
+ }
+ for i := range lbList {
+ if isAlive(&lbList[i]) && trimmed(name) == trimmed(lbList[i].Name) {
+ return &lbList[i], nil
+ }
+ }
+ return nil, nil
+}
+
+// GetLoadBalancerByID finds a Load Balancer from its ID
+func (c *Cloud) GetLoadBalancerByID(id string) (*brightbox.LoadBalancer, error) {
+ klog.V(4).Infof("GetLoadBalancerById (%q)", id)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.LoadBalancer(id)
+}
+
+// CreateLoadBalancer creates a LoadBalancer
+func (c *Cloud) CreateLoadBalancer(newDetails *brightbox.LoadBalancerOptions) (*brightbox.LoadBalancer, error) {
+ klog.V(4).Infof("CreateLoadBalancer (%q)", *newDetails.Name)
+ klog.V(6).Infof("%+v", newDetails)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.CreateLoadBalancer(newDetails)
+}
+
+// UpdateLoadBalancer updates a LoadBalancer
+func (c *Cloud) UpdateLoadBalancer(newDetails *brightbox.LoadBalancerOptions) (*brightbox.LoadBalancer, error) {
+ klog.V(4).Infof("UpdateLoadBalancer (%q, %q)", newDetails.Id, *newDetails.Name)
+ klog.V(6).Infof("%+v", newDetails)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.UpdateLoadBalancer(newDetails)
+}
+
+// GetFirewallPolicyByName get a FirewallPolicy from its name
+func (c *Cloud) GetFirewallPolicyByName(name string) (*brightbox.FirewallPolicy, error) {
+ klog.V(4).Infof("getFirewallPolicyByName (%q)", name)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ firewallPolicyList, err := client.FirewallPolicies()
+ if err != nil {
+ return nil, err
+ }
+ var result *brightbox.FirewallPolicy
+ for i := range firewallPolicyList {
+ if name == firewallPolicyList[i].Name {
+ result = &firewallPolicyList[i]
+ break
+ }
+ }
+ return result, nil
+}
+
+// GetServerTypes obtains the list of Server Types on the account
+func (c *Cloud) GetServerTypes() ([]brightbox.ServerType, error) {
+ klog.V(4).Info("GetServerTypes")
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.ServerTypes()
+}
+
+// GetServerType fetches a Server Type from its ID
+func (c *Cloud) GetServerType(identifier string) (*brightbox.ServerType, error) {
+ klog.V(4).Infof("GetServerType %q", identifier)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.ServerType(identifier)
+}
+
+// GetConfigMaps obtains the list of Config Maps on the account
+func (c *Cloud) GetConfigMaps() ([]brightbox.ConfigMap, error) {
+ klog.V(4).Info("GetConfigMaps")
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.ConfigMaps()
+}
+
+// GetConfigMap fetches a Config Map from its ID
+func (c *Cloud) GetConfigMap(identifier string) (*brightbox.ConfigMap, error) {
+ klog.V(4).Infof("GetConfigMap %q", identifier)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.ConfigMap(identifier)
+}
+
+// GetServerGroups obtains the list of Server Groups on the account
+func (c *Cloud) GetServerGroups() ([]brightbox.ServerGroup, error) {
+ klog.V(4).Info("GetServerGroups")
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.ServerGroups()
+}
+
+// GetServerGroup fetches a Server Group from its ID
+func (c *Cloud) GetServerGroup(identifier string) (*brightbox.ServerGroup, error) {
+ klog.V(4).Infof("GetServerGroup %q", identifier)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.ServerGroup(identifier)
+}
+
+// GetServerGroupByName fetches a Server Group from its name
+func (c *Cloud) GetServerGroupByName(name string) (*brightbox.ServerGroup, error) {
+ klog.V(4).Infof("GetServerGroupByName (%q)", name)
+ serverGroupList, err := c.GetServerGroups()
+ if err != nil {
+ return nil, err
+ }
+ var result *brightbox.ServerGroup
+ for i := range serverGroupList {
+ if name == serverGroupList[i].Name {
+ result = &serverGroupList[i]
+ break
+ }
+ }
+ return result, nil
+}
+
+// CreateServerGroup creates a Server Group
+func (c *Cloud) CreateServerGroup(name string) (*brightbox.ServerGroup, error) {
+ klog.V(4).Infof("CreateServerGroup (%q)", name)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.CreateServerGroup(&brightbox.ServerGroupOptions{Name: &name})
+}
+
+// CreateFirewallPolicy creates a Firewall Policy
+func (c *Cloud) CreateFirewallPolicy(group *brightbox.ServerGroup) (*brightbox.FirewallPolicy, error) {
+ klog.V(4).Infof("createFirewallPolicy (%q)", group.Name)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.CreateFirewallPolicy(&brightbox.FirewallPolicyOptions{Name: &group.Name, ServerGroup: &group.Id})
+}
+
+// CreateFirewallRule creates a Firewall Rule
+func (c *Cloud) CreateFirewallRule(newDetails *brightbox.FirewallRuleOptions) (*brightbox.FirewallRule, error) {
+ klog.V(4).Infof("createFirewallRule (%q)", *newDetails.Description)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.CreateFirewallRule(newDetails)
+}
+
+// UpdateFirewallRule updates a Firewall Rule
+func (c *Cloud) UpdateFirewallRule(newDetails *brightbox.FirewallRuleOptions) (*brightbox.FirewallRule, error) {
+ klog.V(4).Infof("updateFirewallRule (%q, %q)", newDetails.Id, *newDetails.Description)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.UpdateFirewallRule(newDetails)
+}
+
+// EnsureMappedCloudIP checks to make sure the Cloud IP is mapped to the Load Balancer.
+// This function is idempotent.
+func (c *Cloud) EnsureMappedCloudIP(lb *brightbox.LoadBalancer, cip *brightbox.CloudIP) error {
+ klog.V(4).Infof("EnsureMappedCloudIP (%q, %q)", lb.Id, cip.Id)
+ if alreadyMapped(cip, lb.Id) {
+ return nil
+ } else if cip.Status == status.Mapped {
+ return fmt.Errorf("Unexplained mapping of %q (%q)", cip.Id, cip.PublicIP)
+ }
+ client, err := c.CloudClient()
+ if err != nil {
+ return err
+ }
+ return client.MapCloudIP(cip.Id, lb.Id)
+}
+
+func alreadyMapped(cip *brightbox.CloudIP, loadBalancerID string) bool {
+ return cip.LoadBalancer != nil && cip.LoadBalancer.Id == loadBalancerID
+}
+
+// AllocateCloudIP allocates a new Cloud IP and gives it the name specified
+func (c *Cloud) AllocateCloudIP(name string) (*brightbox.CloudIP, error) {
+ klog.V(4).Infof("AllocateCloudIP %q", name)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ opts := &brightbox.CloudIPOptions{
+ Name: &name,
+ }
+ return client.CreateCloudIP(opts)
+}
+
+// GetCloudIPs obtains the list of allocated Cloud IPs
+func (c *Cloud) GetCloudIPs() ([]brightbox.CloudIP, error) {
+ klog.V(4).Infof("GetCloudIPs")
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.CloudIPs()
+}
+
+//Get a cloudIp by id
+func (c *Cloud) getCloudIP(id string) (*brightbox.CloudIP, error) {
+ klog.V(4).Infof("getCloudIP (%q)", id)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ return client.CloudIP(id)
+}
+
+// Destroy things
+
+// DestroyLoadBalancer removes a Load Balancer
+func (c *Cloud) DestroyLoadBalancer(id string) error {
+ klog.V(4).Infof("DestroyLoadBalancer %q", id)
+ client, err := c.CloudClient()
+ if err != nil {
+ return err
+ }
+ return client.DestroyLoadBalancer(id)
+}
+
+// DestroyServer removes a Server
+func (c *Cloud) DestroyServer(id string) error {
+ klog.V(4).Infof("DestroyServer %q", id)
+ client, err := c.CloudClient()
+ if err != nil {
+ return err
+ }
+ return client.DestroyServer(id)
+}
+
+// DestroyServerGroup removes a Server Group
+func (c *Cloud) DestroyServerGroup(id string) error {
+ klog.V(4).Infof("DestroyServerGroup %q", id)
+ client, err := c.CloudClient()
+ if err != nil {
+ return err
+ }
+ return client.DestroyServerGroup(id)
+}
+
+// DestroyFirewallPolicy removes a Firewall Policy
+func (c *Cloud) DestroyFirewallPolicy(id string) error {
+ klog.V(4).Infof("DestroyFirewallPolicy %q", id)
+ client, err := c.CloudClient()
+ if err != nil {
+ return err
+ }
+ return client.DestroyFirewallPolicy(id)
+}
+
+// DestroyCloudIP removes a Cloud IP allocation
+func (c *Cloud) DestroyCloudIP(id string) error {
+ klog.V(4).Infof("DestroyCloudIP (%q)", id)
+ client, err := c.CloudClient()
+ if err != nil {
+ return err
+ }
+ return client.DestroyCloudIP(id)
+}
+
+// unmapCloudIP removes a mapping to a Cloud IP
+func (c *Cloud) unmapCloudIP(id string) error {
+ klog.V(4).Infof("unmapCloudIP (%q)", id)
+ client, err := c.CloudClient()
+ if err != nil {
+ return err
+ }
+ return client.UnMapCloudIP(id)
+}
+
+//DestroyCloudIPs matching 'name' from a supplied list of cloudIPs
+func (c *Cloud) DestroyCloudIPs(cloudIPList []brightbox.CloudIP, name string) error {
+ klog.V(4).Infof("DestroyCloudIPs (%q)", name)
+ for _, v := range cloudIPList {
+ if v.Name == name {
+ if err := c.DestroyCloudIP(v.Id); err != nil {
+ klog.V(4).Infof("Error destroying CloudIP %q", v.Id)
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// EnsureOldCloudIPsDeposed unmaps any CloudIPs mapped to the loadbalancer
+// that isn't the current cloudip and matches 'name'
+func (c *Cloud) EnsureOldCloudIPsDeposed(cloudIPList []brightbox.CloudIP, currentIPID string, name string) error {
+ klog.V(4).Infof("EnsureOldCloudIPsDeposed (%q, %q)", currentIPID, name)
+ for _, v := range cloudIPList {
+ if v.Name == name && v.Id != currentIPID {
+ if err := c.unmapCloudIP(v.Id); err != nil {
+ klog.V(4).Infof("Error unmapping CloudIP %q", v.Id)
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func mapServersToServerIds(servers []brightbox.Server) []string {
+ result := make([]string, len(servers))
+ for i := range servers {
+ result[i] = servers[i].Id
+ }
+ return result
+}
+
+// SyncServerGroup ensures a Brightbox Server Group contains the supplied
+// list of Servers and nothing else
+func (c *Cloud) SyncServerGroup(group *brightbox.ServerGroup, newServerIds []string) (*brightbox.ServerGroup, error) {
+ oldServerIds := mapServersToServerIds(group.Servers)
+ klog.V(4).Infof("SyncServerGroup (%v, %v, %v)", group.Id, oldServerIds, newServerIds)
+ client, err := c.CloudClient()
+ if err != nil {
+ return nil, err
+ }
+ serverIdsToInsert, serverIdsToDelete := getSyncLists(oldServerIds, newServerIds)
+ result := group
+ if len(serverIdsToInsert) > 0 {
+ klog.V(4).Infof("Adding Servers %v", serverIdsToInsert)
+ result, err = client.AddServersToServerGroup(group.Id, serverIdsToInsert)
+ }
+ if err == nil && len(serverIdsToDelete) > 0 {
+ klog.V(4).Infof("Removing Servers %v", serverIdsToDelete)
+ result, err = client.RemoveServersFromServerGroup(group.Id, serverIdsToDelete)
+ }
+ return result, err
+}
+
+// IsUpdateLoadBalancerRequired checks whether a set of LoadBalancerOptions
+// warrants an API update call.
+func IsUpdateLoadBalancerRequired(lb *brightbox.LoadBalancer, newDetails brightbox.LoadBalancerOptions) bool {
+ klog.V(6).Infof("Update LoadBalancer Required (%v, %v)", *newDetails.Name, lb.Name)
+ return (newDetails.Name != nil && *newDetails.Name != lb.Name) ||
+ (newDetails.Healthcheck != nil && isUpdateLoadBalancerHealthcheckRequired(newDetails.Healthcheck, &lb.Healthcheck)) ||
+ isUpdateLoadBalancerNodeRequired(newDetails.Nodes, lb.Nodes) ||
+ isUpdateLoadBalancerListenerRequired(newDetails.Listeners, lb.Listeners) ||
+ isUpdateLoadBalancerDomainsRequired(newDetails.Domains, lb.Acme)
+}
+
+func isUpdateLoadBalancerHealthcheckRequired(newHealthCheck *brightbox.LoadBalancerHealthcheck, oldHealthCheck *brightbox.LoadBalancerHealthcheck) bool {
+ klog.V(6).Infof("Update LoadBalancer Healthcheck Required (%#v, %#v)", *newHealthCheck, *oldHealthCheck)
+ return (newHealthCheck.Type != oldHealthCheck.Type) ||
+ (newHealthCheck.Port != oldHealthCheck.Port) ||
+ (newHealthCheck.Request != oldHealthCheck.Request)
+}
+
+func isUpdateLoadBalancerNodeRequired(a []brightbox.LoadBalancerNode, b []brightbox.Server) bool {
+ klog.V(6).Infof("Update LoadBalancer Node Required (%v, %v)", a, b)
+ // If one is nil, the other must also be nil.
+ if (a == nil) != (b == nil) {
+ return true
+ }
+ if len(a) != len(b) {
+ return true
+ }
+ for i := range a {
+ if a[i].Node != b[i].Id {
+ return true
+ }
+ }
+ return false
+}
+
+func isUpdateLoadBalancerListenerRequired(a []brightbox.LoadBalancerListener, b []brightbox.LoadBalancerListener) bool {
+ klog.V(6).Infof("Update LoadBalancer Listener Required (%v, %v)", a, b)
+ // If one is nil, the other must also be nil.
+ if (a == nil) != (b == nil) {
+ return true
+ }
+ if len(a) != len(b) {
+ return true
+ }
+ for i := range a {
+ if (a[i].Protocol != b[i].Protocol) ||
+ (a[i].In != b[i].In) ||
+ (a[i].Out != b[i].Out) ||
+ (a[i].Timeout != 0 && b[i].Timeout != 0 && a[i].Timeout != b[i].Timeout) ||
+ (a[i].ProxyProtocol != b[i].ProxyProtocol) {
+ return true
+ }
+ }
+ return false
+}
+
+func isUpdateLoadBalancerDomainsRequired(a *[]string, acme *brightbox.LoadBalancerAcme) bool {
+ klog.V(6).Infof("Update LoadBalancer Domains Required (%v)", a)
+ if acme == nil {
+ return a != nil
+ }
+ if a == nil {
+ return false
+ }
+ b := make([]string, len(acme.Domains))
+ for i, domain := range acme.Domains {
+ b[i] = domain.Identifier
+ }
+ return !sameStringSlice(*a, b)
+}
+
+// ErrorIfNotErased returns an appropriate error if the Load Balancer has not been erased
+func ErrorIfNotErased(lb *brightbox.LoadBalancer) error {
+ switch {
+ case lb == nil:
+ return nil
+ case lb.CloudIPs != nil && len(lb.CloudIPs) > 0:
+ return fmt.Errorf("CloudIPs still mapped to load balancer %q", lb.Id)
+ case !isAlive(lb):
+ return nil
+ }
+ return fmt.Errorf("Unknown reason why %q has not deleted", lb.Id)
+}
+
+// ErrorIfNotComplete returns an appropriate error if the Load Balancer has not yet built
+func ErrorIfNotComplete(lb *brightbox.LoadBalancer, cipID, name string) error {
+ switch {
+ case lb == nil:
+ return fmt.Errorf("Load Balancer for %q is missing", name)
+ case !isAlive(lb):
+ return fmt.Errorf("Load Balancer %q still building", lb.Id)
+ case !containsCIP(lb.CloudIPs, cipID):
+ return fmt.Errorf("Mapping of CloudIP %q to %q not complete", cipID, lb.Id)
+ }
+ return ErrorIfAcmeNotComplete(lb.Acme)
+}
+
+// Look for a CIP Id in a list of cloudIPs
+func containsCIP(cloudIPList []brightbox.CloudIP, cipID string) bool {
+ for _, v := range cloudIPList {
+ if v.Id == cipID {
+ return true
+ }
+ }
+ return false
+}
+
+// ErrorIfAcmeNotComplete returns an appropriate error if ACME has not yet validated
+func ErrorIfAcmeNotComplete(acme *brightbox.LoadBalancerAcme) error {
+ if acme != nil {
+ for _, domain := range acme.Domains {
+ if domain.Status != ValidAcmeDomainStatus {
+ return fmt.Errorf("Domain %q has not yet been validated for SSL use (%q:%q)", domain.Identifier, domain.Status, domain.LastMessage)
+ }
+ }
+ }
+ return nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/cached/cached.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/cached/cached.go
new file mode 100644
index 000000000000..d3366b8cab55
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/cached/cached.go
@@ -0,0 +1,111 @@
+// Copyright 2020 Brightbox Systems Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cached
+
+import (
+ "net/http"
+ "time"
+
+ cache "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/go-cache"
+ brightbox "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox"
+
+ klog "k8s.io/klog/v2"
+)
+
+const (
+ expirationTime = 5 * time.Second
+ purgeTime = 30 * time.Second
+)
+
+// Client is a cached brightbox Client
+type Client struct {
+ clientCache *cache.Cache
+ brightbox.Client
+}
+
+// NewClient creates and returns a cached Client
+func NewClient(url string, account string, httpClient *http.Client) (*Client, error) {
+ cl, err := brightbox.NewClient(url, account, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{
+ clientCache: cache.New(expirationTime, purgeTime),
+ Client: *cl,
+ }, err
+}
+
+//Server fetches a server by id
+func (c *Client) Server(identifier string) (*brightbox.Server, error) {
+ if cachedServer, found := c.clientCache.Get(identifier); found {
+ klog.V(4).Infof("Cache hit %q", identifier)
+ return cachedServer.(*brightbox.Server), nil
+ }
+ server, err := c.Client.Server(identifier)
+ if err != nil {
+ return nil, err
+ }
+ klog.V(4).Infof("Cacheing %q", identifier)
+ c.clientCache.Set(identifier, server, cache.DefaultExpiration)
+ return server, nil
+}
+
+//ServerGroup fetches a server group by id
+func (c *Client) ServerGroup(identifier string) (*brightbox.ServerGroup, error) {
+ if cachedServerGroup, found := c.clientCache.Get(identifier); found {
+ klog.V(4).Infof("Cache hit %q", identifier)
+ return cachedServerGroup.(*brightbox.ServerGroup), nil
+ }
+ serverGroup, err := c.Client.ServerGroup(identifier)
+ if err != nil {
+ return nil, err
+ }
+ klog.V(4).Infof("Cacheing %q", identifier)
+ c.clientCache.Set(identifier, serverGroup, cache.DefaultExpiration)
+ return serverGroup, nil
+}
+
+//ConfigMap fetches a config map by id
+func (c *Client) ConfigMap(identifier string) (*brightbox.ConfigMap, error) {
+ if cachedConfigMap, found := c.clientCache.Get(identifier); found {
+ klog.V(4).Infof("Cache hit %q", identifier)
+ return cachedConfigMap.(*brightbox.ConfigMap), nil
+ }
+ configMap, err := c.Client.ConfigMap(identifier)
+ if err != nil {
+ return nil, err
+ }
+ klog.V(4).Infof("Cacheing %q", identifier)
+ c.clientCache.Set(identifier, configMap, cache.DefaultExpiration)
+ return configMap, nil
+}
+
+//DestroyServer removes a server by id
+func (c *Client) DestroyServer(identifier string) error {
+ err := c.Client.DestroyServer(identifier)
+ if err == nil {
+ c.clientCache.Delete(identifier)
+ }
+ return err
+}
+
+//DestroyServerGroup removes a server group by id
+func (c *Client) DestroyServerGroup(identifier string) error {
+ err := c.Client.DestroyServerGroup(identifier)
+ if err == nil {
+ c.clientCache.Delete(identifier)
+ }
+ return err
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/clients.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/clients.go
new file mode 100644
index 000000000000..296da3bb0d15
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/clients.go
@@ -0,0 +1,59 @@
+// Copyright 2018 Brightbox Systems Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package k8ssdk
+
+import (
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/session"
+)
+
+// EC2Metadata is an abstraction over the AWS metadata service.
+type EC2Metadata interface {
+ // Query the EC2 metadata service (used to discover instance-id etc)
+ GetMetadata(path string) (string, error)
+}
+
+// Cloud allows access to the Brightbox Cloud and/or any EC2 compatible metadata.
+type Cloud struct {
+ client CloudAccess
+ metadataClientCache EC2Metadata
+}
+
+// MetadataClient returns the EC2 Metadata client, or creates a new client
+// from the default AWS config if one doesn't exist.
+func (c *Cloud) MetadataClient() (EC2Metadata, error) {
+ if c.metadataClientCache == nil {
+ cfg, err := session.NewSession()
+ if err != nil {
+ return nil, err
+ }
+ c.metadataClientCache = ec2metadata.New(cfg)
+ }
+
+ return c.metadataClientCache, nil
+}
+
+// CloudClient returns the Brightbox Cloud client, or creates a new client from the current environment if one doesn't exist.
+func (c *Cloud) CloudClient() (CloudAccess, error) {
+ if c.client == nil {
+ client, err := obtainCloudClient()
+ if err != nil {
+ return nil, err
+ }
+ c.client = client
+ }
+
+ return c.client, nil
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/cloud_access.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/cloud_access.go
new file mode 100644
index 000000000000..d1ad5d55056b
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/cloud_access.go
@@ -0,0 +1,106 @@
+// Copyright 2020 Brightbox Systems Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package k8ssdk
+
+import brightbox "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox"
+
+// CloudAccess is an abstraction over the Brightbox API to allow testing
+type CloudAccess interface {
+ //Fetch a server
+ Server(identifier string) (*brightbox.Server, error)
+
+ //creates a new server
+ CreateServer(newServer *brightbox.ServerOptions) (*brightbox.Server, error)
+
+ //Fetch a list of LoadBalancers
+ LoadBalancers() ([]brightbox.LoadBalancer, error)
+
+ //Retrieves a detailed view of one load balancer
+ LoadBalancer(identifier string) (*brightbox.LoadBalancer, error)
+
+ //Creates a new load balancer
+ CreateLoadBalancer(newDetails *brightbox.LoadBalancerOptions) (*brightbox.LoadBalancer, error)
+
+ //Updates an existing load balancer
+ UpdateLoadBalancer(newDetails *brightbox.LoadBalancerOptions) (*brightbox.LoadBalancer, error)
+
+ //Retrieves a list of all cloud IPs
+ CloudIPs() ([]brightbox.CloudIP, error)
+
+ //retrieves a detailed view of one cloud ip
+ CloudIP(identifier string) (*brightbox.CloudIP, error)
+
+ //Issues a request to map the cloud ip to the destination
+ MapCloudIP(identifier string, destination string) error
+
+ //UnMapCloudIP issues a request to unmap the cloud ip
+ UnMapCloudIP(identifier string) error
+
+ //Creates a new Cloud IP
+ CreateCloudIP(newCloudIP *brightbox.CloudIPOptions) (*brightbox.CloudIP, error)
+ //adds servers to an existing server group
+ AddServersToServerGroup(identifier string, serverIds []string) (*brightbox.ServerGroup, error)
+
+ //removes servers from an existing server group
+ RemoveServersFromServerGroup(identifier string, serverIds []string) (*brightbox.ServerGroup, error)
+
+ // ServerGroups retrieves a list of all server groups
+ ServerGroups() ([]brightbox.ServerGroup, error)
+
+ //Fetch a server group
+ ServerGroup(identifier string) (*brightbox.ServerGroup, error)
+
+ //creates a new server group
+ CreateServerGroup(newServerGroup *brightbox.ServerGroupOptions) (*brightbox.ServerGroup, error)
+
+ //creates a new firewall policy
+ CreateFirewallPolicy(policyOptions *brightbox.FirewallPolicyOptions) (*brightbox.FirewallPolicy, error)
+
+ //creates a new firewall rule
+ CreateFirewallRule(ruleOptions *brightbox.FirewallRuleOptions) (*brightbox.FirewallRule, error)
+
+ //updates an existing firewall rule
+ UpdateFirewallRule(ruleOptions *brightbox.FirewallRuleOptions) (*brightbox.FirewallRule, error)
+
+ //retrieves a list of all firewall policies
+ FirewallPolicies() ([]brightbox.FirewallPolicy, error)
+
+ // DestroyServer destroys an existing server
+ DestroyServer(identifier string) error
+
+ // DestroyServerGroup destroys an existing server group
+ DestroyServerGroup(identifier string) error
+
+ // DestroyFirewallPolicy issues a request to destroy the firewall policy
+ DestroyFirewallPolicy(identifier string) error
+
+ // DestroyLoadBalancer issues a request to destroy the load balancer
+ DestroyLoadBalancer(identifier string) error
+
+ // DestroyCloudIP issues a request to destroy the cloud ip
+ DestroyCloudIP(identifier string) error
+
+ // ConfigMaps retrieves a list of all config maps
+ ConfigMaps() ([]brightbox.ConfigMap, error)
+
+ // ConfigMap retrieves a detailed view on one config map
+ ConfigMap(identifier string) (*brightbox.ConfigMap, error)
+
+ // ServerTypes retrieves a list of all server types
+ ServerTypes() ([]brightbox.ServerType, error)
+
+ // ServerType retrieves a detailed view on one server type
+ ServerType(identifier string) (*brightbox.ServerType, error)
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/CloudAccess.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/CloudAccess.go
new file mode 100644
index 000000000000..04e09aeb4f35
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/CloudAccess.go
@@ -0,0 +1,630 @@
+// Copyright 2020 Brightbox Systems Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mocks
+
+import (
+ brightbox "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// CloudAccess is an autogenerated mock type for the CloudAccess type
+type CloudAccess struct {
+ mock.Mock
+}
+
+// AddServersToServerGroup provides a mock function with given fields: identifier, serverIds
+func (_m *CloudAccess) AddServersToServerGroup(identifier string, serverIds []string) (*brightbox.ServerGroup, error) {
+ ret := _m.Called(identifier, serverIds)
+
+ var r0 *brightbox.ServerGroup
+ if rf, ok := ret.Get(0).(func(string, []string) *brightbox.ServerGroup); ok {
+ r0 = rf(identifier, serverIds)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.ServerGroup)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(string, []string) error); ok {
+ r1 = rf(identifier, serverIds)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CloudIP provides a mock function with given fields: identifier
+func (_m *CloudAccess) CloudIP(identifier string) (*brightbox.CloudIP, error) {
+ ret := _m.Called(identifier)
+
+ var r0 *brightbox.CloudIP
+ if rf, ok := ret.Get(0).(func(string) *brightbox.CloudIP); ok {
+ r0 = rf(identifier)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.CloudIP)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(string) error); ok {
+ r1 = rf(identifier)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CloudIPs provides a mock function with given fields:
+func (_m *CloudAccess) CloudIPs() ([]brightbox.CloudIP, error) {
+ ret := _m.Called()
+
+ var r0 []brightbox.CloudIP
+ if rf, ok := ret.Get(0).(func() []brightbox.CloudIP); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]brightbox.CloudIP)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ConfigMap provides a mock function with given fields: identifier
+func (_m *CloudAccess) ConfigMap(identifier string) (*brightbox.ConfigMap, error) {
+ ret := _m.Called(identifier)
+
+ var r0 *brightbox.ConfigMap
+ if rf, ok := ret.Get(0).(func(string) *brightbox.ConfigMap); ok {
+ r0 = rf(identifier)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.ConfigMap)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(string) error); ok {
+ r1 = rf(identifier)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ConfigMaps provides a mock function with given fields:
+func (_m *CloudAccess) ConfigMaps() ([]brightbox.ConfigMap, error) {
+ ret := _m.Called()
+
+ var r0 []brightbox.ConfigMap
+ if rf, ok := ret.Get(0).(func() []brightbox.ConfigMap); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]brightbox.ConfigMap)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CreateCloudIP provides a mock function with given fields: newCloudIP
+func (_m *CloudAccess) CreateCloudIP(newCloudIP *brightbox.CloudIPOptions) (*brightbox.CloudIP, error) {
+ ret := _m.Called(newCloudIP)
+
+ var r0 *brightbox.CloudIP
+ if rf, ok := ret.Get(0).(func(*brightbox.CloudIPOptions) *brightbox.CloudIP); ok {
+ r0 = rf(newCloudIP)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.CloudIP)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(*brightbox.CloudIPOptions) error); ok {
+ r1 = rf(newCloudIP)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CreateFirewallPolicy provides a mock function with given fields: policyOptions
+func (_m *CloudAccess) CreateFirewallPolicy(policyOptions *brightbox.FirewallPolicyOptions) (*brightbox.FirewallPolicy, error) {
+ ret := _m.Called(policyOptions)
+
+ var r0 *brightbox.FirewallPolicy
+ if rf, ok := ret.Get(0).(func(*brightbox.FirewallPolicyOptions) *brightbox.FirewallPolicy); ok {
+ r0 = rf(policyOptions)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.FirewallPolicy)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(*brightbox.FirewallPolicyOptions) error); ok {
+ r1 = rf(policyOptions)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CreateFirewallRule provides a mock function with given fields: ruleOptions
+func (_m *CloudAccess) CreateFirewallRule(ruleOptions *brightbox.FirewallRuleOptions) (*brightbox.FirewallRule, error) {
+ ret := _m.Called(ruleOptions)
+
+ var r0 *brightbox.FirewallRule
+ if rf, ok := ret.Get(0).(func(*brightbox.FirewallRuleOptions) *brightbox.FirewallRule); ok {
+ r0 = rf(ruleOptions)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.FirewallRule)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(*brightbox.FirewallRuleOptions) error); ok {
+ r1 = rf(ruleOptions)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CreateLoadBalancer provides a mock function with given fields: newDetails
+func (_m *CloudAccess) CreateLoadBalancer(newDetails *brightbox.LoadBalancerOptions) (*brightbox.LoadBalancer, error) {
+ ret := _m.Called(newDetails)
+
+ var r0 *brightbox.LoadBalancer
+ if rf, ok := ret.Get(0).(func(*brightbox.LoadBalancerOptions) *brightbox.LoadBalancer); ok {
+ r0 = rf(newDetails)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.LoadBalancer)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(*brightbox.LoadBalancerOptions) error); ok {
+ r1 = rf(newDetails)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CreateServer provides a mock function with given fields: newServer
+func (_m *CloudAccess) CreateServer(newServer *brightbox.ServerOptions) (*brightbox.Server, error) {
+ ret := _m.Called(newServer)
+
+ var r0 *brightbox.Server
+ if rf, ok := ret.Get(0).(func(*brightbox.ServerOptions) *brightbox.Server); ok {
+ r0 = rf(newServer)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.Server)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(*brightbox.ServerOptions) error); ok {
+ r1 = rf(newServer)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// CreateServerGroup provides a mock function with given fields: newServerGroup
+func (_m *CloudAccess) CreateServerGroup(newServerGroup *brightbox.ServerGroupOptions) (*brightbox.ServerGroup, error) {
+ ret := _m.Called(newServerGroup)
+
+ var r0 *brightbox.ServerGroup
+ if rf, ok := ret.Get(0).(func(*brightbox.ServerGroupOptions) *brightbox.ServerGroup); ok {
+ r0 = rf(newServerGroup)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.ServerGroup)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(*brightbox.ServerGroupOptions) error); ok {
+ r1 = rf(newServerGroup)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// DestroyCloudIP provides a mock function with given fields: identifier
+func (_m *CloudAccess) DestroyCloudIP(identifier string) error {
+ ret := _m.Called(identifier)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string) error); ok {
+ r0 = rf(identifier)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// DestroyFirewallPolicy provides a mock function with given fields: identifier
+func (_m *CloudAccess) DestroyFirewallPolicy(identifier string) error {
+ ret := _m.Called(identifier)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string) error); ok {
+ r0 = rf(identifier)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// DestroyLoadBalancer provides a mock function with given fields: identifier
+func (_m *CloudAccess) DestroyLoadBalancer(identifier string) error {
+ ret := _m.Called(identifier)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string) error); ok {
+ r0 = rf(identifier)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// DestroyServer provides a mock function with given fields: identifier
+func (_m *CloudAccess) DestroyServer(identifier string) error {
+ ret := _m.Called(identifier)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string) error); ok {
+ r0 = rf(identifier)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// DestroyServerGroup provides a mock function with given fields: identifier
+func (_m *CloudAccess) DestroyServerGroup(identifier string) error {
+ ret := _m.Called(identifier)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string) error); ok {
+ r0 = rf(identifier)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// FirewallPolicies provides a mock function with given fields:
+func (_m *CloudAccess) FirewallPolicies() ([]brightbox.FirewallPolicy, error) {
+ ret := _m.Called()
+
+ var r0 []brightbox.FirewallPolicy
+ if rf, ok := ret.Get(0).(func() []brightbox.FirewallPolicy); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]brightbox.FirewallPolicy)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// LoadBalancer provides a mock function with given fields: identifier
+func (_m *CloudAccess) LoadBalancer(identifier string) (*brightbox.LoadBalancer, error) {
+ ret := _m.Called(identifier)
+
+ var r0 *brightbox.LoadBalancer
+ if rf, ok := ret.Get(0).(func(string) *brightbox.LoadBalancer); ok {
+ r0 = rf(identifier)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.LoadBalancer)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(string) error); ok {
+ r1 = rf(identifier)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// LoadBalancers provides a mock function with given fields:
+func (_m *CloudAccess) LoadBalancers() ([]brightbox.LoadBalancer, error) {
+ ret := _m.Called()
+
+ var r0 []brightbox.LoadBalancer
+ if rf, ok := ret.Get(0).(func() []brightbox.LoadBalancer); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]brightbox.LoadBalancer)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// MapCloudIP provides a mock function with given fields: identifier, destination
+func (_m *CloudAccess) MapCloudIP(identifier string, destination string) error {
+ ret := _m.Called(identifier, destination)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string, string) error); ok {
+ r0 = rf(identifier, destination)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// RemoveServersFromServerGroup provides a mock function with given fields: identifier, serverIds
+func (_m *CloudAccess) RemoveServersFromServerGroup(identifier string, serverIds []string) (*brightbox.ServerGroup, error) {
+ ret := _m.Called(identifier, serverIds)
+
+ var r0 *brightbox.ServerGroup
+ if rf, ok := ret.Get(0).(func(string, []string) *brightbox.ServerGroup); ok {
+ r0 = rf(identifier, serverIds)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.ServerGroup)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(string, []string) error); ok {
+ r1 = rf(identifier, serverIds)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Server provides a mock function with given fields: identifier
+func (_m *CloudAccess) Server(identifier string) (*brightbox.Server, error) {
+ ret := _m.Called(identifier)
+
+ var r0 *brightbox.Server
+ if rf, ok := ret.Get(0).(func(string) *brightbox.Server); ok {
+ r0 = rf(identifier)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.Server)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(string) error); ok {
+ r1 = rf(identifier)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ServerGroup provides a mock function with given fields: identifier
+func (_m *CloudAccess) ServerGroup(identifier string) (*brightbox.ServerGroup, error) {
+ ret := _m.Called(identifier)
+
+ var r0 *brightbox.ServerGroup
+ if rf, ok := ret.Get(0).(func(string) *brightbox.ServerGroup); ok {
+ r0 = rf(identifier)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.ServerGroup)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(string) error); ok {
+ r1 = rf(identifier)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ServerGroups provides a mock function with given fields:
+func (_m *CloudAccess) ServerGroups() ([]brightbox.ServerGroup, error) {
+ ret := _m.Called()
+
+ var r0 []brightbox.ServerGroup
+ if rf, ok := ret.Get(0).(func() []brightbox.ServerGroup); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]brightbox.ServerGroup)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ServerType provides a mock function with given fields: identifier
+func (_m *CloudAccess) ServerType(identifier string) (*brightbox.ServerType, error) {
+ ret := _m.Called(identifier)
+
+ var r0 *brightbox.ServerType
+ if rf, ok := ret.Get(0).(func(string) *brightbox.ServerType); ok {
+ r0 = rf(identifier)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.ServerType)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(string) error); ok {
+ r1 = rf(identifier)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// ServerTypes provides a mock function with given fields:
+func (_m *CloudAccess) ServerTypes() ([]brightbox.ServerType, error) {
+ ret := _m.Called()
+
+ var r0 []brightbox.ServerType
+ if rf, ok := ret.Get(0).(func() []brightbox.ServerType); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]brightbox.ServerType)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func() error); ok {
+ r1 = rf()
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// UnMapCloudIP provides a mock function with given fields: identifier
+func (_m *CloudAccess) UnMapCloudIP(identifier string) error {
+ ret := _m.Called(identifier)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string) error); ok {
+ r0 = rf(identifier)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// UpdateFirewallRule provides a mock function with given fields: ruleOptions
+func (_m *CloudAccess) UpdateFirewallRule(ruleOptions *brightbox.FirewallRuleOptions) (*brightbox.FirewallRule, error) {
+ ret := _m.Called(ruleOptions)
+
+ var r0 *brightbox.FirewallRule
+ if rf, ok := ret.Get(0).(func(*brightbox.FirewallRuleOptions) *brightbox.FirewallRule); ok {
+ r0 = rf(ruleOptions)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.FirewallRule)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(*brightbox.FirewallRuleOptions) error); ok {
+ r1 = rf(ruleOptions)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// UpdateLoadBalancer provides a mock function with given fields: newDetails
+func (_m *CloudAccess) UpdateLoadBalancer(newDetails *brightbox.LoadBalancerOptions) (*brightbox.LoadBalancer, error) {
+ ret := _m.Called(newDetails)
+
+ var r0 *brightbox.LoadBalancer
+ if rf, ok := ret.Get(0).(func(*brightbox.LoadBalancerOptions) *brightbox.LoadBalancer); ok {
+ r0 = rf(newDetails)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*brightbox.LoadBalancer)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(*brightbox.LoadBalancerOptions) error); ok {
+ r1 = rf(newDetails)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/EC2Metadata.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/EC2Metadata.go
new file mode 100644
index 000000000000..ef2f4464f9cb
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/EC2Metadata.go
@@ -0,0 +1,43 @@
+// Copyright 2020 Brightbox Systems Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mocks
+
+import mock "github.com/stretchr/testify/mock"
+
+// EC2Metadata is an autogenerated mock type for the EC2Metadata type
+type EC2Metadata struct {
+ mock.Mock
+}
+
+// GetMetadata provides a mock function with given fields: path
+func (_m *EC2Metadata) GetMetadata(path string) (string, error) {
+ ret := _m.Called(path)
+
+ var r0 string
+ if rf, ok := ret.Get(0).(func(string) string); ok {
+ r0 = rf(path)
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(string) error); ok {
+ r1 = rf(path)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/faker.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/faker.go
new file mode 100644
index 000000000000..c7431401d690
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/mocks/faker.go
@@ -0,0 +1,26 @@
+// Copyright 2019 Brightbox Systems Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mocks
+
+import (
+ mock "github.com/stretchr/testify/mock"
+ brightbox "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox/gobrightbox"
+)
+
+func ServerListReducer(target *brightbox.ServerGroup) func(mock.Arguments) {
+ return func(mock.Arguments) {
+ target.Servers = target.Servers[1:]
+ }
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/testing.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/testing.go
new file mode 100644
index 000000000000..836955234302
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/testing.go
@@ -0,0 +1,106 @@
+// Copyright 2020 Brightbox Systems Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package k8ssdk
+
+import (
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+)
+
+func ResetAuthEnvironment() {
+ vars := []string{
+ clientEnvVar,
+ clientSecretEnvVar,
+ usernameEnvVar,
+ passwordEnvVar,
+ accountEnvVar,
+ apiURLEnvVar,
+ }
+ for _, envvar := range vars {
+ os.Unsetenv(envvar)
+ }
+}
+
+func SetAuthEnvClientID() {
+ os.Setenv(clientSecretEnvVar, "not default")
+}
+
+func SetAuthEnvUsername() {
+ os.Setenv(usernameEnvVar, "itsy@bitzy.com")
+}
+
+func SetAuthEnvPassword() {
+ os.Setenv(passwordEnvVar, "madeuppassword")
+}
+
+func SetAuthEnvAPIURL(value string) {
+ os.Setenv(apiURLEnvVar, value)
+}
+
+func SetAuthEnvAccount() {
+ os.Setenv(accountEnvVar, "acc-testy")
+}
+
+func ClearAuthEnvUsername() {
+ os.Unsetenv(usernameEnvVar)
+}
+
+func GetAuthEnvTokenHandler(t *testing.T) *httptest.Server {
+ ResetAuthEnvironment()
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer r.Body.Close()
+ expected := "/token"
+ if r.URL.String() != expected {
+ t.Errorf("URL = %q; want %q", r.URL, expected)
+ }
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Errorf("Failed reading request body: %s.", err)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ expected = "application/x-www-form-urlencoded"
+ if headerContentType != expected {
+ t.Errorf("Content-Type header = %q; want %q", headerContentType, expected)
+ }
+ headerAuth := r.Header.Get("Authorization")
+ expected = "Basic YXBwLWRrbWNoOnVvZ29lbHpndDBud2F3Yg=="
+ if headerAuth != expected {
+ t.Errorf("Authorization header = %q; want %q", headerAuth, expected)
+ }
+ switch string(body) {
+ case "grant_type=password&password=madeuppassword&scope=infrastructure&username=itsy%40bitzy.com":
+ w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+ w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
+ case "grant_type=password&password=&scope=infrastructure&username=itsy%40bitzy.com":
+ w.WriteHeader(http.StatusUnauthorized)
+ default:
+ t.Errorf("Unexpected res.Body = %q", string(body))
+ w.WriteHeader(http.StatusUnauthorized)
+ }
+ }))
+ SetAuthEnvAPIURL(ts.URL)
+ SetAuthEnvAccount()
+ return ts
+}
+
+func MakeTestClient(testClient CloudAccess, testMetadata EC2Metadata) *Cloud {
+ return &Cloud{
+ client: testClient,
+ metadataClientCache: testMetadata,
+ }
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/util.go b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/util.go
new file mode 100644
index 000000000000..884083c166a6
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/k8ssdk/util.go
@@ -0,0 +1,114 @@
+// Copyright 2018 Brightbox Systems Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package k8ssdk
+
+import (
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+)
+
+const (
+ ProviderName = "brightbox"
+ ProviderPrefix = ProviderName + "://"
+)
+
+// Parse the provider id string and return a string that should be a server id
+// Should be no need for error checking here, since the input string
+// is constrained in format by the k8s process
+func MapProviderIDToServerID(providerID string) string {
+ if strings.HasPrefix(providerID, ProviderPrefix) {
+ return strings.TrimPrefix(providerID, ProviderPrefix)
+ }
+ return providerID
+}
+
+// Add the provider prefix to the server ID
+func MapServerIDToProviderID(serverID string) string {
+ return ProviderPrefix + serverID
+}
+
+// Parse the zone handle and return the embedded region id
+// Zone names are of the form: ${region-name}-${ix}
+// So we look for the last '-' and trim just before that
+func MapZoneHandleToRegion(zoneHandle string) (string, error) {
+ ix := strings.LastIndex(zoneHandle, "-")
+ if ix == -1 {
+ return "", fmt.Errorf("unexpected zone: %s", zoneHandle)
+ }
+ return zoneHandle[:ix], nil
+}
+
+// getEnvVarWithDefault retrieves the value of the environment variable
+// named by the key. If the variable is not present, return the default
+//value instead.
+func getenvWithDefault(key string, defaultValue string) string {
+ if val, exists := os.LookupEnv(key); exists {
+ return val
+ }
+ return defaultValue
+}
+
+//get a list of inserts and deletes that changes oldList into newList
+func getSyncLists(oldList []string, newList []string) ([]string, []string) {
+ sort.Strings(oldList)
+ sort.Strings(newList)
+ var x, y int
+ var insList, delList []string
+ for x < len(oldList) || y < len(newList) {
+ switch {
+ case y >= len(newList):
+ delList = append(delList, oldList[x])
+ x++
+ case x >= len(oldList):
+ insList = append(insList, newList[y])
+ y++
+ case oldList[x] < newList[y]:
+ delList = append(delList, oldList[x])
+ x++
+ case oldList[x] > newList[y]:
+ insList = append(insList, newList[y])
+ y++
+ default:
+ y++
+ x++
+ }
+ }
+ return insList, delList
+}
+
+func sameStringSlice(x, y []string) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ // create a map of string -> int
+ diff := make(map[string]int, len(x))
+ for _, _x := range x {
+ // 0 value for int is 0, so just increment a counter for the string
+ diff[_x]++
+ }
+ for _, _y := range y {
+ // If the string _y is not in diff bail out early
+ if _, ok := diff[_y]; !ok {
+ return false
+ }
+ diff[_y]--
+ if diff[_y] == 0 {
+ delete(diff, _y)
+ }
+ }
+ return len(diff) == 0
+}
diff --git a/cluster-autoscaler/cloudprovider/brightbox/linkheader/.gitignore b/cluster-autoscaler/cloudprovider/brightbox/linkheader/.gitignore
new file mode 100644
index 000000000000..0a00ddebb8ac
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/linkheader/.gitignore
@@ -0,0 +1,2 @@
+cpu.out
+linkheader.test
diff --git a/cluster-autoscaler/cloudprovider/brightbox/linkheader/.travis.yml b/cluster-autoscaler/cloudprovider/brightbox/linkheader/.travis.yml
new file mode 100644
index 000000000000..cfda08659a3d
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/linkheader/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+
+go:
+ - 1.6
+ - 1.7
+ - tip
diff --git a/cluster-autoscaler/cloudprovider/brightbox/linkheader/CONTRIBUTING.mkd b/cluster-autoscaler/cloudprovider/brightbox/linkheader/CONTRIBUTING.mkd
new file mode 100644
index 000000000000..0339bec5537c
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/linkheader/CONTRIBUTING.mkd
@@ -0,0 +1,10 @@
+# Contributing
+
+* Raise an issue if appropriate
+* Fork the repo
+* Bootstrap the dev dependencies (run `./script/bootstrap`)
+* Make your changes
+* Use [gofmt](https://golang.org/cmd/gofmt/)
+* Make sure the tests pass (run `./script/test`)
+* Make sure the linters pass (run `./script/lint`)
+* Issue a pull request
diff --git a/cluster-autoscaler/cloudprovider/brightbox/linkheader/LICENSE b/cluster-autoscaler/cloudprovider/brightbox/linkheader/LICENSE
new file mode 100644
index 000000000000..55192df5648b
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/linkheader/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 Tom Hudson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/cluster-autoscaler/cloudprovider/brightbox/linkheader/README.mkd b/cluster-autoscaler/cloudprovider/brightbox/linkheader/README.mkd
new file mode 100644
index 000000000000..2a949cac2f72
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/linkheader/README.mkd
@@ -0,0 +1,35 @@
+# Golang Link Header Parser
+
+Library for parsing HTTP Link headers. Requires Go 1.6 or higher.
+
+Docs can be found on [the GoDoc page](https://godoc.org/github.com/tomnomnom/linkheader).
+
+[![Build Status](https://travis-ci.org/tomnomnom/linkheader.svg)](https://travis-ci.org/tomnomnom/linkheader)
+
+## Basic Example
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/tomnomnom/linkheader"
+)
+
+func main() {
+ header := "; rel=\"next\"," +
+ "; rel=\"last\""
+ links := linkheader.Parse(header)
+
+ for _, link := range links {
+ fmt.Printf("URL: %s; Rel: %s\n", link.URL, link.Rel)
+ }
+}
+
+// Output:
+// URL: https://api.github.com/user/58276/repos?page=2; Rel: next
+// URL: https://api.github.com/user/58276/repos?page=2; Rel: last
+```
+
+
diff --git a/cluster-autoscaler/cloudprovider/brightbox/linkheader/main.go b/cluster-autoscaler/cloudprovider/brightbox/linkheader/main.go
new file mode 100644
index 000000000000..6b81321b8456
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/brightbox/linkheader/main.go
@@ -0,0 +1,151 @@
+// Package linkheader provides functions for parsing HTTP Link headers
+package linkheader
+
+import (
+ "fmt"
+ "strings"
+)
+
+// A Link is a single URL and related parameters
+type Link struct {
+ URL string
+ Rel string
+ Params map[string]string
+}
+
+// HasParam returns if a Link has a particular parameter or not
+func (l Link) HasParam(key string) bool {
+ for p := range l.Params {
+ if p == key {
+ return true
+ }
+ }
+ return false
+}
+
+// Param returns the value of a parameter if it exists
+func (l Link) Param(key string) string {
+ for k, v := range l.Params {
+ if key == k {
+ return v
+ }
+ }
+ return ""
+}
+
+// String returns the string representation of a link
+func (l Link) String() string {
+
+ p := make([]string, 0, len(l.Params))
+ for k, v := range l.Params {
+ p = append(p, fmt.Sprintf("%s=\"%s\"", k, v))
+ }
+ if l.Rel != "" {
+ p = append(p, fmt.Sprintf("%s=\"%s\"", "rel", l.Rel))
+ }
+ return fmt.Sprintf("<%s>; %s", l.URL, strings.Join(p, "; "))
+}
+
+// Links is a slice of Link structs
+type Links []Link
+
+// FilterByRel filters a group of Links by the provided Rel attribute
+func (l Links) FilterByRel(r string) Links {
+ links := make(Links, 0)
+ for _, link := range l {
+ if link.Rel == r {
+ links = append(links, link)
+ }
+ }
+ return links
+}
+
+// String returns the string representation of multiple Links
+// for use in HTTP responses etc
+func (l Links) String() string {
+ if l == nil {
+ return fmt.Sprint(nil)
+ }
+
+ var strs []string
+ for _, link := range l {
+ strs = append(strs, link.String())
+ }
+ return strings.Join(strs, ", ")
+}
+
+// Parse parses a raw Link header in the form:
+// ; rel="foo", ; rel="bar"; wat="dis"
+// returning a slice of Link structs
+func Parse(raw string) Links {
+ var links Links
+
+ // One chunk: ; rel="foo"
+ for _, chunk := range strings.Split(raw, ",") {
+
+ link := Link{URL: "", Rel: "", Params: make(map[string]string)}
+
+ // Figure out what each piece of the chunk is
+ for _, piece := range strings.Split(chunk, ";") {
+
+ piece = strings.Trim(piece, " ")
+ if piece == "" {
+ continue
+ }
+
+ // URL
+ if piece[0] == '<' && piece[len(piece)-1] == '>' {
+ link.URL = strings.Trim(piece, "<>")
+ continue
+ }
+
+ // Params
+ key, val := parseParam(piece)
+ if key == "" {
+ continue
+ }
+
+ // Special case for rel
+ if strings.ToLower(key) == "rel" {
+ link.Rel = val
+ } else {
+ link.Params[key] = val
+ }
+ }
+
+ if link.URL != "" {
+ links = append(links, link)
+ }
+ }
+
+ return links
+}
+
+// ParseMultiple is like Parse, but accepts a slice of headers
+// rather than just one header string
+func ParseMultiple(headers []string) Links {
+ links := make(Links, 0)
+ for _, header := range headers {
+ links = append(links, Parse(header)...)
+ }
+ return links
+}
+
+// parseParam takes a raw param in the form key="val" and
+// returns the key and value as seperate strings
+func parseParam(raw string) (key, val string) {
+
+ parts := strings.SplitN(raw, "=", 2)
+ if len(parts) == 1 {
+ return parts[0], ""
+ }
+ if len(parts) != 2 {
+ return "", ""
+ }
+
+ key = parts[0]
+ val = strings.Trim(parts[1], "\"")
+
+ return key, val
+
+}
diff --git a/cluster-autoscaler/cloudprovider/builder/builder_all.go b/cluster-autoscaler/cloudprovider/builder/builder_all.go
index a10526145425..7970b91c4dcf 100644
--- a/cluster-autoscaler/cloudprovider/builder/builder_all.go
+++ b/cluster-autoscaler/cloudprovider/builder/builder_all.go
@@ -1,4 +1,4 @@
-// +build !gce,!aws,!azure,!kubemark,!alicloud,!magnum,!digitalocean,!clusterapi,!huaweicloud,!ionoscloud,!linode,!hetzner,!bizflycloud
+// +build !gce,!aws,!azure,!kubemark,!alicloud,!magnum,!digitalocean,!clusterapi,!huaweicloud,!ionoscloud,!linode,!hetzner,!bizflycloud,!brightbox
/*
Copyright 2018 The Kubernetes Authors.
@@ -25,6 +25,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/azure"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/baiducloud"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/bizflycloud"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/cloudstack"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/clusterapi"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/digitalocean"
@@ -58,6 +59,7 @@ var AvailableCloudProviders = []string{
cloudprovider.IonoscloudProviderName,
cloudprovider.LinodeProviderName,
cloudprovider.BizflyCloudProviderName,
+ cloudprovider.BrightboxProviderName,
}
// DefaultCloudProvider is GCE.
@@ -79,6 +81,8 @@ func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGro
return cloudstack.BuildCloudStack(opts, do, rl)
case cloudprovider.BaiducloudProviderName:
return baiducloud.BuildBaiducloud(opts, do, rl)
+ case cloudprovider.BrightboxProviderName:
+ return brightbox.BuildBrightbox(opts, do, rl)
case cloudprovider.DigitalOceanProviderName:
return digitalocean.BuildDigitalOcean(opts, do, rl)
case cloudprovider.ExoscaleProviderName:
diff --git a/cluster-autoscaler/cloudprovider/builder/builder_brightbox.go b/cluster-autoscaler/cloudprovider/builder/builder_brightbox.go
new file mode 100644
index 000000000000..c227fba0e5f6
--- /dev/null
+++ b/cluster-autoscaler/cloudprovider/builder/builder_brightbox.go
@@ -0,0 +1,42 @@
+// +build brightbox
+
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package builder
+
+import (
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
+ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/brightbox"
+ "k8s.io/autoscaler/cluster-autoscaler/config"
+)
+
+// AvailableCloudProviders supported by the brightbox cloud provider builder.
+var AvailableCloudProviders = []string{
+ cloudprovider.BrightboxProviderName,
+}
+
+// DefaultCloudProvider is Brightbox
+const DefaultCloudProvider = cloudprovider.BrightboxProviderName
+
+func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider {
+ switch opts.CloudProviderName {
+ case cloudprovider.BrightboxProviderName:
+ return brightbox.BuildBrightbox(opts, do, rl)
+ }
+
+ return nil
+}
diff --git a/cluster-autoscaler/cloudprovider/cloud_provider.go b/cluster-autoscaler/cloudprovider/cloud_provider.go
index 41ec7e914903..de9bcf81b718 100644
--- a/cluster-autoscaler/cloudprovider/cloud_provider.go
+++ b/cluster-autoscaler/cloudprovider/cloud_provider.go
@@ -38,6 +38,8 @@ const (
BaiducloudProviderName = "baiducloud"
// BizflyCloudProviderName gets the provider name of bizflycloud
BizflyCloudProviderName = "bizflycloud"
+ // BrightboxProviderName gets the provider name of brightbox
+ BrightboxProviderName = "brightbox"
// CloudStackProviderName gets the provider name of cloudstack
CloudStackProviderName = "cloudstack"
// ClusterAPIProviderName gets the provider name of clusterapi
diff --git a/cluster-autoscaler/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/cluster-autoscaler/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go
new file mode 100644
index 000000000000..7a0b9ed1029e
--- /dev/null
+++ b/cluster-autoscaler/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go
@@ -0,0 +1,120 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package clientcredentials implements the OAuth2.0 "client credentials" token flow,
+// also known as the "two-legged OAuth 2.0".
+//
+// This should be used when the client is acting on its own behalf or when the client
+// is the resource owner. It may also be used when requesting access to protected
+// resources based on an authorization previously arranged with the authorization
+// server.
+//
+// See https://tools.ietf.org/html/rfc6749#section-4.4
+package clientcredentials // import "golang.org/x/oauth2/clientcredentials"
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+)
+
+// Config describes a 2-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+type Config struct {
+ // ClientID is the application's ID.
+ ClientID string
+
+ // ClientSecret is the application's secret.
+ ClientSecret string
+
+ // TokenURL is the resource server's token endpoint
+ // URL. This is a constant specific to each server.
+ TokenURL string
+
+ // Scope specifies optional requested permissions.
+ Scopes []string
+
+ // EndpointParams specifies additional parameters for requests to the token endpoint.
+ EndpointParams url.Values
+
+ // AuthStyle optionally specifies how the endpoint wants the
+ // client ID & client secret sent. The zero value means to
+ // auto-detect.
+ AuthStyle oauth2.AuthStyle
+}
+
+// Token uses client credentials to retrieve a token.
+//
+// The provided context optionally controls which HTTP client is used. See the oauth2.HTTPClient variable.
+func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) {
+ return c.TokenSource(ctx).Token()
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary.
+//
+// The provided context optionally controls which HTTP client
+// is returned. See the oauth2.HTTPClient variable.
+//
+// The returned Client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context) *http.Client {
+ return oauth2.NewClient(ctx, c.TokenSource(ctx))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context and the
+// client ID and client secret.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
+ source := &tokenSource{
+ ctx: ctx,
+ conf: c,
+ }
+ return oauth2.ReuseTokenSource(nil, source)
+}
+
+type tokenSource struct {
+ ctx context.Context
+ conf *Config
+}
+
+// Token refreshes the token by using a new client credentials request.
+// tokens received this way do not include a refresh token
+func (c *tokenSource) Token() (*oauth2.Token, error) {
+ v := url.Values{
+ "grant_type": {"client_credentials"},
+ }
+ if len(c.conf.Scopes) > 0 {
+ v.Set("scope", strings.Join(c.conf.Scopes, " "))
+ }
+ for k, p := range c.conf.EndpointParams {
+ // Allow grant_type to be overridden to allow interoperability with
+ // non-compliant implementations.
+ if _, ok := v[k]; ok && k != "grant_type" {
+ return nil, fmt.Errorf("oauth2: cannot overwrite parameter %q", k)
+ }
+ v[k] = p
+ }
+
+ tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v, internal.AuthStyle(c.conf.AuthStyle))
+ if err != nil {
+ if rErr, ok := err.(*internal.RetrieveError); ok {
+ return nil, (*oauth2.RetrieveError)(rErr)
+ }
+ return nil, err
+ }
+ t := &oauth2.Token{
+ AccessToken: tk.AccessToken,
+ TokenType: tk.TokenType,
+ RefreshToken: tk.RefreshToken,
+ Expiry: tk.Expiry,
+ }
+ return t.WithExtra(tk.Raw), nil
+}
diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt
index b3ba353ef397..5c0ae87cad3c 100644
--- a/cluster-autoscaler/vendor/modules.txt
+++ b/cluster-autoscaler/vendor/modules.txt
@@ -708,6 +708,7 @@ golang.org/x/net/websocket
## explicit
golang.org/x/oauth2
golang.org/x/oauth2/authhandler
+golang.org/x/oauth2/clientcredentials
golang.org/x/oauth2/google
golang.org/x/oauth2/google/internal/externalaccount
golang.org/x/oauth2/internal