Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OCM-2641, OCM-17: Specify worker disk size for machine pools #244

Merged
merged 3 commits into from
Oct 29, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/resources/cluster_rosa_classic.md
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ resource "rhcs_cluster_rosa_classic" "rosa_sts_cluster" {
- `upgrade_acknowledgements_for` (String) Indicates acknowledgement of agreements required to upgrade the cluster version between minor versions (e.g. a value of "4.12" indicates acknowledgement of any agreements required to upgrade to OpenShift 4.12.z from 4.11 or before).
- `version` (String) Desired version of OpenShift for the cluster, for example '4.11.0'. If version is greater than the currently running version, an upgrade will be scheduled.
- `wait_for_create_complete` (Boolean) Wait until the cluster is either in a ready state or in an error state. The waiter has a timeout of 60 minutes, with the default value set to false
- `worker_disk_size` (Number) Compute node root disk size, in GiB. (only valid during cluster creation)

### Read-Only

Expand Down
1 change: 1 addition & 0 deletions docs/resources/machine_pool.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ resource "rhcs_machine_pool" "machine_pool" {

- `autoscaling_enabled` (Boolean) Enables autoscaling. If `true`, this variable requires you to set a maximum and minimum replicas range using the `max_replicas` and `min_replicas` variables.
- `availability_zone` (String) Select the availability zone in which to create a single AZ machine pool for a multi-AZ cluster
- `disk_size` (Number) Root disk size, in GiB.
- `labels` (Map of String) Labels for the machine pool. Format should be a comma-separated list of 'key = value'. This list will overwrite any modifications made to node labels on an ongoing basis.
- `max_replicas` (Number) The maximum number of replicas for autoscaling functionality.
- `max_spot_price` (Number) Max Spot price.
Expand Down
10 changes: 9 additions & 1 deletion internal/ocm/resource/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,22 @@ func (c *Cluster) Build() (object *cmv1.Cluster, err error) {

func (c *Cluster) CreateNodes(autoScalingEnabled bool, replicas *int64, minReplicas *int64,
maxReplicas *int64, computeMachineType *string, labels map[string]string,
availabilityZones []string, multiAZ bool) error {
availabilityZones []string, multiAZ bool, workerDiskSize *int64) error {
nodes := cmv1.NewClusterNodes()
if computeMachineType != nil {
nodes.ComputeMachineType(
cmv1.NewMachineType().ID(*computeMachineType),
)
}

if workerDiskSize != nil {
nodes.ComputeRootVolume(
cmv1.NewRootVolume().AWS(
cmv1.NewAWSVolume().Size(int(*workerDiskSize)),
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to set default value here or just use the default value in the OCM backend?
What is the behavior in the CLI?

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the CLI, if the size is not set or identical to the flavor's default size, we do nothing and let the backend handle the defaults.

),
)
}

if labels != nil {
nodes.ComputeLabels(labels)
}
Expand Down
46 changes: 30 additions & 16 deletions internal/ocm/resource/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,22 +15,22 @@ var _ = Describe("Cluster", func() {
})
Context("CreateNodes validation", func() {
It("Autoscaling disabled minReplicas set - failure", func() {
err := cluster.CreateNodes(false, nil, pointer(int64(2)), nil, nil, nil, nil, false)
err := cluster.CreateNodes(false, nil, pointer(int64(2)), nil, nil, nil, nil, false, nil)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add one test with non empty diskSize value

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Autoscaling must be enabled in order to set min and max replicas"))
})
It("Autoscaling disabled maxReplicas set - failure", func() {
err := cluster.CreateNodes(false, nil, nil, pointer(int64(2)), nil, nil, nil, false)
err := cluster.CreateNodes(false, nil, nil, pointer(int64(2)), nil, nil, nil, false, nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Autoscaling must be enabled in order to set min and max replicas"))
})
It("Autoscaling disabled replicas smaller than 2 - failure", func() {
err := cluster.CreateNodes(false, pointer(int64(1)), nil, nil, nil, nil, nil, false)
err := cluster.CreateNodes(false, pointer(int64(1)), nil, nil, nil, nil, nil, false, nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Cluster requires at least 2 compute nodes"))
})
It("Autoscaling disabled default replicas - success", func() {
err := cluster.CreateNodes(false, nil, nil, nil, nil, nil, nil, false)
err := cluster.CreateNodes(false, nil, nil, nil, nil, nil, nil, false, nil)
Expect(err).NotTo(HaveOccurred())
ocmCluster, err := cluster.Build()
Expect(err).NotTo(HaveOccurred())
Expand All @@ -44,7 +44,7 @@ var _ = Describe("Cluster", func() {
Expect(autoscaleCompute).To(BeNil())
})
It("Autoscaling disabled 3 replicas - success", func() {
err := cluster.CreateNodes(false, pointer(int64(3)), nil, nil, nil, nil, nil, false)
err := cluster.CreateNodes(false, pointer(int64(3)), nil, nil, nil, nil, nil, false, nil)
Expect(err).NotTo(HaveOccurred())
ocmCluster, err := cluster.Build()
Expect(err).NotTo(HaveOccurred())
Expand All @@ -58,12 +58,12 @@ var _ = Describe("Cluster", func() {
Expect(autoscaleCompute).To(BeNil())
})
It("Autoscaling enabled replicas set - failure", func() {
err := cluster.CreateNodes(true, pointer(int64(2)), nil, nil, nil, nil, nil, false)
err := cluster.CreateNodes(true, pointer(int64(2)), nil, nil, nil, nil, nil, false, nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("When autoscaling is enabled, replicas should not be configured"))
})
It("Autoscaling enabled default minReplicas & maxReplicas - success", func() {
err := cluster.CreateNodes(true, nil, nil, nil, nil, nil, nil, false)
err := cluster.CreateNodes(true, nil, nil, nil, nil, nil, nil, false, nil)
Expect(err).NotTo(HaveOccurred())
ocmCluster, err := cluster.Build()
Expect(err).NotTo(HaveOccurred())
Expand All @@ -79,12 +79,12 @@ var _ = Describe("Cluster", func() {
Expect(autoscaleCompute.MaxReplicas()).To(Equal(2))
})
It("Autoscaling enabled default maxReplicas smaller than minReplicas - failure", func() {
err := cluster.CreateNodes(true, nil, pointer(int64(4)), pointer(int64(3)), nil, nil, nil, false)
err := cluster.CreateNodes(true, nil, pointer(int64(4)), pointer(int64(3)), nil, nil, nil, false, nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("max-replicas must be greater or equal to min-replicas"))
})
It("Autoscaling enabled set minReplicas & maxReplicas - success", func() {
err := cluster.CreateNodes(true, nil, pointer(int64(2)), pointer(int64(4)), nil, nil, nil, false)
err := cluster.CreateNodes(true, nil, pointer(int64(2)), pointer(int64(4)), nil, nil, nil, false, nil)
Expect(err).NotTo(HaveOccurred())
ocmCluster, err := cluster.Build()
Expect(err).NotTo(HaveOccurred())
Expand All @@ -100,7 +100,7 @@ var _ = Describe("Cluster", func() {
Expect(autoscaleCompute.MaxReplicas()).To(Equal(4))
})
It("Autoscaling disabled set ComputeMachineType - success", func() {
err := cluster.CreateNodes(false, nil, nil, nil, pointer("asdf"), nil, nil, false)
err := cluster.CreateNodes(false, nil, nil, nil, pointer("asdf"), nil, nil, false, nil)
Expect(err).NotTo(HaveOccurred())
ocmCluster, err := cluster.Build()
Expect(err).NotTo(HaveOccurred())
Expand All @@ -116,7 +116,7 @@ var _ = Describe("Cluster", func() {
Expect(autoscaleCompute).To(BeNil())
})
It("Autoscaling disabled set compute labels - success", func() {
err := cluster.CreateNodes(false, nil, nil, nil, nil, map[string]string{"key1": "val1"}, nil, false)
err := cluster.CreateNodes(false, nil, nil, nil, nil, map[string]string{"key1": "val1"}, nil, false, nil)
Expect(err).NotTo(HaveOccurred())
ocmCluster, err := cluster.Build()
Expect(err).NotTo(HaveOccurred())
Expand All @@ -132,7 +132,7 @@ var _ = Describe("Cluster", func() {
Expect(autoscaleCompute).To(BeNil())
})
It("Autoscaling disabled multiAZ false set one availability zone - success", func() {
err := cluster.CreateNodes(false, nil, nil, nil, nil, nil, []string{"us-east-1a"}, false)
err := cluster.CreateNodes(false, nil, nil, nil, nil, nil, []string{"us-east-1a"}, false, nil)
Expect(err).NotTo(HaveOccurred())
ocmCluster, err := cluster.Build()
Expect(err).NotTo(HaveOccurred())
Expand All @@ -147,17 +147,17 @@ var _ = Describe("Cluster", func() {
Expect(autoscaleCompute).To(BeNil())
})
It("Autoscaling disabled multiAZ false set three availability zones - failure", func() {
err := cluster.CreateNodes(false, nil, nil, nil, nil, nil, []string{"us-east-1a", "us-east-1b", "us-east-1c"}, false)
err := cluster.CreateNodes(false, nil, nil, nil, nil, nil, []string{"us-east-1a", "us-east-1b", "us-east-1c"}, false, nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("The number of availability zones for a single AZ cluster should be 1, instead received: 3"))
})
It("Autoscaling disabled multiAZ true set three availability zones and two replicas - failure", func() {
err := cluster.CreateNodes(false, pointer(int64(2)), nil, nil, nil, nil, []string{"us-east-1a", "us-east-1b", "us-east-1c"}, true)
err := cluster.CreateNodes(false, pointer(int64(2)), nil, nil, nil, nil, []string{"us-east-1a", "us-east-1b", "us-east-1c"}, true, nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Multi AZ cluster requires at least 3 compute nodes"))
})
It("Autoscaling disabled multiAZ true set three availability zones and three replicas - success", func() {
err := cluster.CreateNodes(false, pointer(int64(3)), nil, nil, nil, nil, []string{"us-east-1a", "us-east-1b", "us-east-1c"}, true)
err := cluster.CreateNodes(false, pointer(int64(3)), nil, nil, nil, nil, []string{"us-east-1a", "us-east-1b", "us-east-1c"}, true, nil)
Expect(err).NotTo(HaveOccurred())
ocmCluster, err := cluster.Build()
Expect(err).NotTo(HaveOccurred())
Expand All @@ -172,10 +172,24 @@ var _ = Describe("Cluster", func() {
Expect(autoscaleCompute).To(BeNil())
})
It("Autoscaling disabled multiAZ true set one zone - failure", func() {
err := cluster.CreateNodes(false, nil, nil, nil, nil, nil, []string{"us-east-1a", "us-east-1b", "us-east-1c"}, true)
err := cluster.CreateNodes(false, nil, nil, nil, nil, nil, []string{"us-east-1a", "us-east-1b", "us-east-1c"}, true, nil)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Multi AZ cluster requires at least 3 compute nodes"))
})
It("Custom disk size", func() {
diskSize := int64(543)
err := cluster.CreateNodes(false, pointer(int64(3)), nil, nil, nil, nil, nil, false, &diskSize)
Expect(err).NotTo(HaveOccurred())
ocmCluster, err := cluster.Build()
Expect(err).NotTo(HaveOccurred())
ocmClusterNode := ocmCluster.Nodes()
Expect(ocmClusterNode).NotTo(BeNil())
rootVolume := ocmClusterNode.ComputeRootVolume()
Expect(rootVolume).NotTo(BeNil())
awsVolume := rootVolume.AWS()
Expect(awsVolume).NotTo(BeNil())
Expect(awsVolume.Size()).To(Equal(int(diskSize)))
})
})
Context("CreateAWSBuilder validation", func() {
It("PrivateLink true subnets IDs empty - failure", func() {
Expand Down
7 changes: 6 additions & 1 deletion provider/clusterrosaclassic/cluster_rosa_classic_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,10 @@ func (r *ClusterRosaClassicResource) Schema(ctx context.Context, req resource.Sc
stringplanmodifier.RequiresReplace(),
},
},
"worker_disk_size": schema.Int64Attribute{
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we should throw an error if the user tries to update it

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we can add a plan modifier that prevents this value being changed.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This option needs to be treated identically to what's being done in #228 since this setting is configuring the default MP.

Since the framework upgrade, we have removed all of the ValueCannotBeChanged checks. Should I look into re-introducing that?
And, what about #228-- should that be changed as well?
If so, I'd prefer to move these checks to another PR.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it should also be changed in #228

Description: "Compute node root disk size, in GiB. (only valid during cluster creation)",
Optional: true,
},
"default_mp_labels": schema.MapAttribute{
Description: "This value is the default/initial machine pool labels. Format should be a comma-separated list of '{\"key1\"=\"value1\", \"key2\"=\"value2\"}'. " +
"This list overwrites any modifications made to node labels on an ongoing basis. ",
Expand Down Expand Up @@ -586,9 +590,10 @@ func createClassicClusterObject(ctx context.Context,
if err != nil {
return nil, err
}
workerDiskSize := common.OptionalInt64(state.WorkerDiskSize)

if err = ocmClusterResource.CreateNodes(autoScalingEnabled, replicas, minReplicas, maxReplicas,
computeMachineType, labels, availabilityZones, multiAZ); err != nil {
computeMachineType, labels, availabilityZones, multiAZ, workerDiskSize); err != nil {
return nil, err
}

Expand Down
1 change: 1 addition & 0 deletions provider/clusterrosaclassic/cluster_rosa_classic_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ type ClusterRosaClassicState struct {
ChannelGroup types.String `tfsdk:"channel_group"`
CloudRegion types.String `tfsdk:"cloud_region"`
ComputeMachineType types.String `tfsdk:"compute_machine_type"`
WorkerDiskSize types.Int64 `tfsdk:"worker_disk_size"`
DefaultMPLabels types.Map `tfsdk:"default_mp_labels"`
Replicas types.Int64 `tfsdk:"replicas"`
ConsoleURL types.String `tfsdk:"console_url"`
Expand Down
27 changes: 27 additions & 0 deletions provider/machinepool/machine_pool_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/float64planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
"github.com/hashicorp/terraform-plugin-framework/schema/validator"
Expand Down Expand Up @@ -187,6 +188,15 @@ func (r *MachinePoolResource) Schema(ctx context.Context, req resource.SchemaReq
stringplanmodifier.UseStateForUnknown(),
},
},
"disk_size": schema.Int64Attribute{
Description: "Root disk size, in GiB.",
Optional: true,
Computed: true,
PlanModifiers: []planmodifier.Int64{
int64planmodifier.RequiresReplace(),
int64planmodifier.UseStateForUnknown(),
},
},
},
}
}
Expand Down Expand Up @@ -255,6 +265,14 @@ func (r *MachinePoolResource) Create(ctx context.Context, req resource.CreateReq
builder := cmv1.NewMachinePool().ID(state.ID.ValueString()).InstanceType(state.MachineType.ValueString())
builder.ID(state.Name.ValueString())

if workerDiskSize := common.OptionalInt64(state.DiskSize); workerDiskSize != nil {
builder.RootVolume(
cmv1.NewRootVolume().AWS(
cmv1.NewAWSVolume().Size(int(*workerDiskSize)),
),
)
}

if err := setSpotInstances(state, builder); err != nil {
resp.Diagnostics.AddError(
"Cannot build machine pool",
Expand Down Expand Up @@ -791,6 +809,15 @@ func (r *MachinePoolResource) populateState(object *cmv1.MachinePool, state *Mac
} else {
state.SubnetID = types.StringValue("")
}

state.DiskSize = types.Int64Null()
if rv, ok := object.GetRootVolume(); ok {
if aws, ok := rv.GetAWS(); ok {
if workerDiskSize, ok := aws.GetSize(); ok {
state.DiskSize = types.Int64Value(int64(workerDiskSize))
}
}
}
}

func shouldPatchTaints(a, b []Taints) bool {
Expand Down
1 change: 1 addition & 0 deletions provider/machinepool/machine_pool_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ type MachinePoolState struct {
MultiAvailabilityZone types.Bool `tfsdk:"multi_availability_zone"`
AvailabilityZone types.String `tfsdk:"availability_zone"`
SubnetID types.String `tfsdk:"subnet_id"`
DiskSize types.Int64 `tfsdk:"disk_size"`
}

type Taints struct {
Expand Down
78 changes: 78 additions & 0 deletions subsystem/cluster_resource_rosa_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -829,6 +829,84 @@ var _ = Describe("rhcs_cluster_rosa_classic - create", func() {
Expect(resource).To(MatchJQ(".attributes.id", "1234")) // reconciled cluster has id of 1234
})

It("Creates basic cluster with custom worker disk size", func() {
// Prepare the server:
server.AppendHandlers(
CombineHandlers(
VerifyRequest(http.MethodGet, "/api/clusters_mgmt/v1/versions"),
RespondWithJSON(http.StatusOK, versionListPage1),
),
CombineHandlers(
VerifyRequest(http.MethodPost, "/api/clusters_mgmt/v1/clusters"),
VerifyJQ(`.name`, "my-cluster"),
VerifyJQ(`.cloud_provider.id`, "aws"),
VerifyJQ(`.region.id`, "us-west-1"),
VerifyJQ(`.product.id`, "rosa"),
VerifyJQ(`.properties.rosa_tf_version`, build.Version),
VerifyJQ(`.properties.rosa_tf_commit`, build.Commit),
VerifyJQ(`.nodes.compute_root_volume.aws.size`, 400.0),
RespondWithPatchedJSON(http.StatusCreated, template, `[
{
"op": "add",
"path": "/aws",
"value": {
"ec2_metadata_http_tokens": "optional",
"sts" : {
"oidc_endpoint_url": "https://127.0.0.2",
"thumbprint": "111111",
"role_arn": "",
"support_role_arn": "",
"instance_iam_roles" : {
"master_role_arn" : "",
"worker_role_arn" : ""
},
"operator_role_prefix" : "test"
}
}
},
{
"op": "add",
"path": "/nodes",
"value": {
"compute": 3,
"availability_zones": ["az"],
"compute_machine_type": {
"id": "r5.xlarge"
},
"compute_root_volume": {
"aws": {
"size": 400
}
}
}
}]`),
),
)

// Run the apply command:
terraform.Source(`
resource "rhcs_cluster_rosa_classic" "my_cluster" {
name = "my-cluster"
cloud_region = "us-west-1"
aws_account_id = "123"
sts = {
operator_role_prefix = "test"
role_arn = "",
support_role_arn = "",
instance_iam_roles = {
master_role_arn = "",
worker_role_arn = "",
}
}
worker_disk_size = 400
}
`)
Expect(terraform.Apply()).To(BeZero())
resource := terraform.Resource("rhcs_cluster_rosa_classic", "my_cluster")
Expect(resource).To(MatchJQ(".attributes.current_version", "openshift-4.8.0"))
Expect(resource).To(MatchJQ(".attributes.worker_disk_size", 400.0))
})

It("Creates basic cluster with properties", func() {
prop_key := "my_prop_key"
prop_val := "my_prop_val"
Expand Down
Loading
Loading