From ab8fcb04848fe351301e788d9e79ac86b8952b74 Mon Sep 17 00:00:00 2001 From: Yusuke KUOKA Date: Wed, 30 Jan 2019 15:52:59 +0900 Subject: [PATCH] Use pointers for ng.(DesiredCapacity|MinSize|MaxSize) Resolves #385 #395 --- pkg/apis/eksctl.io/v1alpha4/types.go | 8 ++-- pkg/cfn/builder/api_test.go | 2 +- pkg/cfn/builder/nodegroup.go | 62 ++++++++++++++----------- pkg/cfn/manager/nodegroup.go | 6 +-- pkg/cfn/manager/nodegroup_test.go | 3 +- pkg/ctl/cmdutils/nodegroup.go | 25 ++++++++-- pkg/ctl/create/cluster.go | 2 +- pkg/ctl/create/nodegroup.go | 2 +- pkg/ctl/scale/nodegroup.go | 10 +++- pkg/ctl/utils/wait_nodes.go | 8 +++- pkg/eks/nodegroup.go | 4 +- pkg/utils/kubeconfig/kubeconfig_test.go | 6 +-- 12 files changed, 89 insertions(+), 49 deletions(-) diff --git a/pkg/apis/eksctl.io/v1alpha4/types.go b/pkg/apis/eksctl.io/v1alpha4/types.go index 93d3270be40..bef678af235 100644 --- a/pkg/apis/eksctl.io/v1alpha4/types.go +++ b/pkg/apis/eksctl.io/v1alpha4/types.go @@ -288,7 +288,7 @@ func (c *ClusterConfig) NewNodeGroup() *NodeGroup { WithLocal: NewBoolTrue(), WithShared: NewBoolTrue(), }, - DesiredCapacity: DefaultNodeCount, + DesiredCapacity: nil, InstanceType: DefaultNodeType, VolumeSize: 0, VolumeType: DefaultNodeVolumeType, @@ -327,11 +327,11 @@ type NodeGroup struct { SecurityGroups *NodeGroupSGs `json:"securityGroups,omitempty"` // +optional - DesiredCapacity int `json:"desiredCapacity"` + DesiredCapacity *int `json:"desiredCapacity"` // +optional - MinSize int `json:"minSize,omitempty"` + MinSize *int `json:"minSize,omitempty"` // +optional - MaxSize int `json:"maxSize,omitempty"` + MaxSize *int `json:"maxSize,omitempty"` // +optional VolumeSize int `json:"volumeSize"` diff --git a/pkg/cfn/builder/api_test.go b/pkg/cfn/builder/api_test.go index fc90b753482..4f71281b18e 100644 --- a/pkg/cfn/builder/api_test.go +++ b/pkg/cfn/builder/api_test.go @@ -308,7 +308,7 @@ var _ = Describe("CloudFormation template builder API", func() { WithShared: api.NewBoolTrue(), AttachIDs: []string{}, }, - DesiredCapacity: 2, + DesiredCapacity: nil, VolumeSize: 2, VolumeType: api.NodeVolumeTypeIO1, IAM: &api.NodeGroupIAM{ diff --git a/pkg/cfn/builder/nodegroup.go b/pkg/cfn/builder/nodegroup.go index 2e0562c0bae..c2940cae0a7 100644 --- a/pkg/cfn/builder/nodegroup.go +++ b/pkg/cfn/builder/nodegroup.go @@ -59,25 +59,28 @@ func (n *NodeGroupResourceSet) AddAllResources() error { } n.userData = gfn.NewString(userData) - switch { - case n.spec.MinSize == 0 && n.spec.MaxSize == 0: - n.spec.MinSize = n.spec.DesiredCapacity - n.spec.MaxSize = n.spec.DesiredCapacity - case n.spec.MinSize > 0 && n.spec.MaxSize > 0: - if n.spec.DesiredCapacity == api.DefaultNodeCount { - msgPrefix := fmt.Sprintf("as --nodes-min=%d and --nodes-max=%d were given", n.spec.MinSize, n.spec.MaxSize) - if n.spec.DesiredCapacity < n.spec.MinSize { - n.spec.DesiredCapacity = n.spec.MaxSize - logger.Info("%s, --nodes=%d was set automatically as default value (--node=%d) was outside the set renge", - msgPrefix, n.spec.DesiredCapacity, api.DefaultNodeCount) - } else { - logger.Info("%s, default value of --nodes=%d was kept as it is within the set range", - msgPrefix, n.spec.DesiredCapacity) - } + // Ensure MinSize is set, as it is required by the ASG cfn resource + if n.spec.MinSize == nil { + if n.spec.DesiredCapacity == nil { + count := api.DefaultNodeCount + n.spec.MinSize = &count + logger.Info("--nodes-min=%d was set automatically", count) + } else { + n.spec.MinSize = n.spec.DesiredCapacity } - if n.spec.DesiredCapacity > n.spec.MaxSize { - return fmt.Errorf("cannot use --nodes-max=%d and --nodes=%d at the same time", n.spec.MaxSize, n.spec.DesiredCapacity) + } else if n.spec.DesiredCapacity != nil && *n.spec.DesiredCapacity < *n.spec.MinSize { + return fmt.Errorf("cannot use --nodes-min=%d and --nodes=%d at the same time", *n.spec.MinSize, *n.spec.DesiredCapacity) + } + + // Ensure MaxSize is set, as it is required by the ASG cfn resource + if n.spec.MaxSize == nil { + if n.spec.DesiredCapacity == nil { + n.spec.MaxSize = n.spec.MinSize } + } else if n.spec.DesiredCapacity != nil && *n.spec.DesiredCapacity > *n.spec.MaxSize { + return fmt.Errorf("cannot use --nodes-max=%d and --nodes=%d at the same time", *n.spec.MaxSize, *n.spec.DesiredCapacity) + } else if *n.spec.MaxSize < *n.spec.MinSize { + return fmt.Errorf("cannot use --nodes-min=%d and --nodes-max=%d at the same time", *n.spec.MinSize, *n.spec.MaxSize) } n.addResourcesForIAM() @@ -179,16 +182,23 @@ func (n *NodeGroupResourceSet) addResourcesForNodeGroup() error { }, ) } + ngProps := map[string]interface{}{ + "LaunchConfigurationName": refLC, + "VPCZoneIdentifier": vpcZoneIdentifier, + "Tags": tags, + } + if n.spec.DesiredCapacity != nil { + ngProps["DesiredCapacity"] = fmt.Sprintf("%d", *n.spec.DesiredCapacity) + } + if n.spec.MinSize != nil { + ngProps["MinSize"] = fmt.Sprintf("%d", *n.spec.MinSize) + } + if n.spec.MaxSize != nil { + ngProps["MaxSize"] = fmt.Sprintf("%d", *n.spec.MaxSize) + } n.newResource("NodeGroup", &awsCloudFormationResource{ - Type: "AWS::AutoScaling::AutoScalingGroup", - Properties: map[string]interface{}{ - "LaunchConfigurationName": refLC, - "DesiredCapacity": fmt.Sprintf("%d", n.spec.DesiredCapacity), - "MinSize": fmt.Sprintf("%d", n.spec.MinSize), - "MaxSize": fmt.Sprintf("%d", n.spec.MaxSize), - "VPCZoneIdentifier": vpcZoneIdentifier, - "Tags": tags, - }, + Type: "AWS::AutoScaling::AutoScalingGroup", + Properties: ngProps, UpdatePolicy: map[string]map[string]string{ "AutoScalingRollingUpdate": { "MinInstancesInService": "1", diff --git a/pkg/cfn/manager/nodegroup.go b/pkg/cfn/manager/nodegroup.go index a041d912855..b8f396dd05e 100644 --- a/pkg/cfn/manager/nodegroup.go +++ b/pkg/cfn/manager/nodegroup.go @@ -157,7 +157,7 @@ func (c *StackCollection) ScaleNodeGroup(ng *api.NodeGroup) error { currentMaxSize := gjson.Get(template, maxSizePath) currentMinSize := gjson.Get(template, minSizePath) - if int64(ng.DesiredCapacity) == currentCapacity.Int() { + if ng.DesiredCapacity != nil && int64(*ng.DesiredCapacity) == currentCapacity.Int() { logger.Info("desired capacity of nodegroup %q in cluster %q is already %d", ng.Name, clusterName, ng.DesiredCapacity) return nil } @@ -171,7 +171,7 @@ func (c *StackCollection) ScaleNodeGroup(ng *api.NodeGroup) error { descriptionBuffer.WriteString(fmt.Sprintf("desired capacity from %s to %d", currentCapacity.Str, ng.DesiredCapacity)) // If the desired number of nodes is less than the min then update the min - if int64(ng.DesiredCapacity) < currentMinSize.Int() { + if int64(*ng.DesiredCapacity) < currentMinSize.Int() { newMinSize := fmt.Sprintf("%d", ng.DesiredCapacity) template, err = sjson.Set(template, minSizePath, newMinSize) if err != nil { @@ -180,7 +180,7 @@ func (c *StackCollection) ScaleNodeGroup(ng *api.NodeGroup) error { descriptionBuffer.WriteString(fmt.Sprintf(", min size from %s to %d", currentMinSize.Str, ng.DesiredCapacity)) } // If the desired number of nodes is greater than the max then update the max - if int64(ng.DesiredCapacity) > currentMaxSize.Int() { + if int64(*ng.DesiredCapacity) > currentMaxSize.Int() { newMaxSize := fmt.Sprintf("%d", ng.DesiredCapacity) template, err = sjson.Set(template, maxSizePath, newMaxSize) if err != nil { diff --git a/pkg/cfn/manager/nodegroup_test.go b/pkg/cfn/manager/nodegroup_test.go index aef3483448d..325d750ebfa 100644 --- a/pkg/cfn/manager/nodegroup_test.go +++ b/pkg/cfn/manager/nodegroup_test.go @@ -76,7 +76,8 @@ var _ = Describe("StackCollection NodeGroup", func() { It("should be a no-op if attempting to scale to the existing desired capacity", func() { ng.Name = "12345" - ng.DesiredCapacity = 2 + cap := 2 + ng.DesiredCapacity = &cap err := sc.ScaleNodeGroup(ng) diff --git a/pkg/ctl/cmdutils/nodegroup.go b/pkg/ctl/cmdutils/nodegroup.go index ca6e597bff6..344c60e81db 100644 --- a/pkg/ctl/cmdutils/nodegroup.go +++ b/pkg/ctl/cmdutils/nodegroup.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/weaveworks/eksctl/pkg/ami" @@ -16,13 +17,29 @@ const ( ) // AddCommonCreateNodeGroupFlags adds common flags for creating a node group -func AddCommonCreateNodeGroupFlags(fs *pflag.FlagSet, p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup) { +func AddCommonCreateNodeGroupFlags(cmd *cobra.Command, fs *pflag.FlagSet, p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.NodeGroup) { + var desiredCapacity int + var minSize int + var maxSize int + fs.StringVarP(&ng.InstanceType, "node-type", "t", defaultNodeType, "node instance type") - fs.IntVarP(&ng.DesiredCapacity, "nodes", "N", api.DefaultNodeCount, "total number of nodes (for a static ASG)") + fs.IntVarP(&desiredCapacity, "nodes", "N", api.DefaultNodeCount, "total number of nodes (for a static ASG)") // TODO: https://github.com/weaveworks/eksctl/issues/28 - fs.IntVarP(&ng.MinSize, "nodes-min", "m", 0, "minimum nodes in ASG") - fs.IntVarP(&ng.MaxSize, "nodes-max", "M", 0, "maximum nodes in ASG") + fs.IntVarP(&minSize, "nodes-min", "m", api.DefaultNodeCount, "minimum nodes in ASG") + fs.IntVarP(&maxSize, "nodes-max", "M", api.DefaultNodeCount, "maximum nodes in ASG") + + cmd.PreRun = func(cmd *cobra.Command, args[] string) { + if f := cmd.Flag("nodes"); f.Changed { + ng.DesiredCapacity = &desiredCapacity + } + if f := cmd.Flag("nodes-min"); f.Changed { + ng.MinSize= &minSize + } + if f := cmd.Flag("nodes-max"); f.Changed { + ng.MaxSize= &maxSize + } + } fs.IntVar(&ng.VolumeSize, "node-volume-size", ng.VolumeSize, "node volume size in GB") fs.StringVar(&ng.VolumeType, "node-volume-type", ng.VolumeType, fmt.Sprintf("node volume type (valid options: %s)", strings.Join(api.SupportedNodeVolumeTypes(), ", "))) diff --git a/pkg/ctl/create/cluster.go b/pkg/ctl/create/cluster.go index 412473548c8..7de2b13b0dc 100644 --- a/pkg/ctl/create/cluster.go +++ b/pkg/ctl/create/cluster.go @@ -73,7 +73,7 @@ func createClusterCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("Initial nodegroup", func(fs *pflag.FlagSet) { fs.StringVar(&ng.Name, "nodegroup-name", "", fmt.Sprintf("name of the nodegroup (generated if unspecified, e.g. %q)", exampleNodeGroupName)) fs.BoolVar(&withoutNodeGroup, "without-nodegroup", false, "if set, initial nodegroup will not be created") - cmdutils.AddCommonCreateNodeGroupFlags(fs, p, cfg, ng) + cmdutils.AddCommonCreateNodeGroupFlags(cmd, fs, p, cfg, ng) }) group.InFlagSet("Cluster and nodegroup add-ons", func(fs *pflag.FlagSet) { diff --git a/pkg/ctl/create/nodegroup.go b/pkg/ctl/create/nodegroup.go index a2b888bbd5b..4983fd7e42c 100644 --- a/pkg/ctl/create/nodegroup.go +++ b/pkg/ctl/create/nodegroup.go @@ -44,7 +44,7 @@ func createNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("New nodegroup", func(fs *pflag.FlagSet) { fs.StringVarP(&ng.Name, "name", "n", "", fmt.Sprintf("name of the new nodegroup (generated if unspecified, e.g. %q)", exampleNodeGroupName)) - cmdutils.AddCommonCreateNodeGroupFlags(fs, p, cfg, ng) + cmdutils.AddCommonCreateNodeGroupFlags(cmd, fs, p, cfg, ng) }) group.InFlagSet("IAM addons", func(fs *pflag.FlagSet) { diff --git a/pkg/ctl/scale/nodegroup.go b/pkg/ctl/scale/nodegroup.go index 9d359ce1b72..2b67c11acb8 100644 --- a/pkg/ctl/scale/nodegroup.go +++ b/pkg/ctl/scale/nodegroup.go @@ -35,7 +35,13 @@ func scaleNodeGroupCmd(g *cmdutils.Grouping) *cobra.Command { fs.StringVar(&cfg.Metadata.Name, "cluster", "", "EKS cluster name") fs.StringVarP(&ng.Name, "name", "n", "", "Name of the nodegroup to scale") - fs.IntVarP(&ng.DesiredCapacity, "nodes", "N", -1, "total number of nodes (scale to this number)") + var desiredCapacity int + fs.IntVarP(&desiredCapacity, "nodes", "N", -1, "total number of nodes (scale to this number)") + cmd.PreRun = func(cmd *cobra.Command, args[] string) { + if f := cmd.Flag("nodes"); f.Changed { + ng.DesiredCapacity = &desiredCapacity + } + } cmdutils.AddRegionFlag(fs, p) }) @@ -70,7 +76,7 @@ func doScaleNodeGroup(p *api.ProviderConfig, cfg *api.ClusterConfig, ng *api.Nod return fmt.Errorf("--name must be set") } - if ng.DesiredCapacity < 0 { + if ng.DesiredCapacity == nil || *ng.DesiredCapacity < 0 { return fmt.Errorf("number of nodes must be 0 or greater. Use the --nodes/-N flag") } diff --git a/pkg/ctl/utils/wait_nodes.go b/pkg/ctl/utils/wait_nodes.go index bd10c90a4fa..06e443e5741 100644 --- a/pkg/ctl/utils/wait_nodes.go +++ b/pkg/ctl/utils/wait_nodes.go @@ -36,7 +36,13 @@ func waitNodesCmd(g *cmdutils.Grouping) *cobra.Command { group.InFlagSet("General", func(fs *pflag.FlagSet) { fs.StringVar(&waitNodesKubeconfigPath, "kubeconfig", "kubeconfig", "path to read kubeconfig") - fs.IntVarP(&ng.MinSize, "nodes-min", "m", api.DefaultNodeCount, "minimum number of nodes to wait for") + var minSize int + fs.IntVarP(&minSize, "nodes-min", "m", api.DefaultNodeCount, "minimum number of nodes to wait for") + cmd.PreRun = func(cmd *cobra.Command, args[] string) { + if f := cmd.Flag("nodes-min"); f.Changed { + ng.MinSize = &minSize + } + } fs.DurationVar(&p.WaitTimeout, "timeout", api.DefaultWaitTimeout, "how long to wait") }) diff --git a/pkg/eks/nodegroup.go b/pkg/eks/nodegroup.go index cf26af28f3c..07b8a7670ed 100644 --- a/pkg/eks/nodegroup.go +++ b/pkg/eks/nodegroup.go @@ -81,7 +81,7 @@ func getNodes(clientSet *clientset.Clientset, ng *api.NodeGroup) (int, error) { // WaitForNodes waits till the nodes are ready func (c *ClusterProvider) WaitForNodes(clientSet *clientset.Clientset, ng *api.NodeGroup) error { - if ng.MinSize == 0 { + if *ng.MinSize == 0 { return nil } timer := time.After(c.Provider.WaitTimeout()) @@ -97,7 +97,7 @@ func (c *ClusterProvider) WaitForNodes(clientSet *clientset.Clientset, ng *api.N } logger.Info("waiting for at least %d node(s) to become ready in %q", ng.MinSize, ng.Name) - for !timeout && counter <= ng.MinSize { + for !timeout && counter <= *ng.MinSize { select { case event := <-watcher.ResultChan(): logger.Debug("event = %#v", event) diff --git a/pkg/utils/kubeconfig/kubeconfig_test.go b/pkg/utils/kubeconfig/kubeconfig_test.go index 5e12e3c8c47..fd10ffbc09b 100644 --- a/pkg/utils/kubeconfig/kubeconfig_test.go +++ b/pkg/utils/kubeconfig/kubeconfig_test.go @@ -146,9 +146,9 @@ var _ = Describe("Kubeconfig", func() { SSHPublicKeyPath: "~/.ssh/id_rsa.pub", SSHPublicKey: []uint8(nil), SSHPublicKeyName: "", - DesiredCapacity: 2, - MinSize: 0, - MaxSize: 0, + DesiredCapacity: nil, + MinSize: nil, + MaxSize: nil, MaxPodsPerNode: 0, IAM: &eksctlapi.NodeGroupIAM{ AttachPolicyARNs: []string(nil),