diff --git a/docs/resources/cluster_rosa_classic.md b/docs/resources/cluster_rosa_classic.md index 7558a1aa..78efaee4 100644 --- a/docs/resources/cluster_rosa_classic.md +++ b/docs/resources/cluster_rosa_classic.md @@ -77,6 +77,7 @@ resource "rhcs_cluster_rosa_classic" "rosa_sts_cluster" { - `host_prefix` (Number) Length of the prefix of the subnet assigned to each node. After the creation of the resource, it is not possible to update the attribute value. - `kms_key_arn` (String) Used to encrypt root volume of compute node pools. The key ARN is the Amazon Resource Name (ARN) of a AWS Key Management Service (KMS) Key. It is a unique, fully qualified identifier for the AWS KMS Key. A key ARN includes the AWS account, Region, and the key ID(optional). After the creation of the resource, it is not possible to update the attribute value. - `machine_cidr` (String) Block of IP addresses for nodes. After the creation of the resource, it is not possible to update the attribute value. +- `max_cluster_wait_timeout_in_minutes` (Number) This value sets the maximum duration in minutes to wait for the cluster to be in a ready state. - `max_replicas` (Number) Maximum replicas of worker nodes in a machine pool. This attribute specifically applies to the Worker Machine Pool and becomes irrelevant once the resource is created. Any modifications to the initial Machine Pool should be made through the Terraform imported Machine Pool resource. For more details, refer to [Worker Machine Pool in ROSA Cluster](../guides/worker-machine-pool.md) - `min_replicas` (Number) Minimum replicas of worker nodes in a machine pool. This attribute specifically applies to the Worker Machine Pool and becomes irrelevant once the resource is created. Any modifications to the initial Machine Pool should be made through the Terraform imported Machine Pool resource. For more details, refer to [Worker Machine Pool in ROSA Cluster](../guides/worker-machine-pool.md) - `multi_az` (Boolean) Indicates if the cluster should be deployed to multiple availability zones. Default value is 'false'. This attribute specifically applies to the Worker Machine Pool and becomes irrelevant once the resource is created. Any modifications to the initial Machine Pool should be made through the Terraform imported Machine Pool resource. For more details, refer to [Worker Machine Pool in ROSA Cluster](../guides/worker-machine-pool.md) diff --git a/docs/resources/cluster_rosa_hcp.md b/docs/resources/cluster_rosa_hcp.md index 119c327f..a87507a5 100644 --- a/docs/resources/cluster_rosa_hcp.md +++ b/docs/resources/cluster_rosa_hcp.md @@ -75,6 +75,8 @@ resource "rhcs_cluster_rosa_hcp" "rosa_sts_cluster" { - `host_prefix` (Number) Length of the prefix of the subnet assigned to each node. After the creation of the resource, it is not possible to update the attribute value. - `kms_key_arn` (String) Used to encrypt root volume of compute node pools. The key ARN is the Amazon Resource Name (ARN) of a AWS Key Management Service (KMS) Key. It is a unique, fully qualified identifier for the AWS KMS Key. A key ARN includes the AWS account, Region, and the key ID(optional). After the creation of the resource, it is not possible to update the attribute value. - `machine_cidr` (String) Block of IP addresses for nodes. After the creation of the resource, it is not possible to update the attribute value. +- `max_hcp_cluster_wait_timeout_in_minutes` (Number) This value sets the maximum duration in minutes to wait for a HCP cluster to be in a ready state. +- `max_machinepool_wait_timeout_in_minutes` (Number) This value sets the maximum duration in minutes to wait for machine pools to be in a ready state. - `pod_cidr` (String) Block of IP addresses for pods. After the creation of the resource, it is not possible to update the attribute value. - `private` (Boolean) Provides private connectivity from your cluster's VPC to Red Hat SRE, without exposing traffic to the public internet. After the creation of the resource, it is not possible to update the attribute value. - `properties` (Map of String) User defined properties. It is essential to include property 'role_creator_arn' with the value of the user creating the cluster. Example: properties = {rosa_creator_arn = data.aws_caller_identity.current.arn} diff --git a/provider/clusterrosa/classic/cluster_rosa_classic_resource.go b/provider/clusterrosa/classic/cluster_rosa_classic_resource.go index 27213a39..7dc906c5 100644 --- a/provider/clusterrosa/classic/cluster_rosa_classic_resource.go +++ b/provider/clusterrosa/classic/cluster_rosa_classic_resource.go @@ -457,6 +457,10 @@ func (r *ClusterRosaClassicResource) Schema(ctx context.Context, req resource.Sc Description: "Wait until the cluster is either in a ready state or in an error state. The waiter has a timeout of 60 minutes, with the default value set to false", Optional: true, }, + "max_cluster_wait_timeout_in_minutes": schema.Int64Attribute{ + Description: "This value sets the maximum duration in minutes to wait for the cluster to be in a ready state.", + Optional: true, + }, }, } } @@ -883,7 +887,15 @@ func (r *ClusterRosaClassicResource) Create(ctx context.Context, request resourc } if common.HasValue(state.WaitForCreateComplete) && state.WaitForCreateComplete.ValueBool() { - object, err = r.ClusterWait.WaitForClusterToBeReady(ctx, object.ID(), rosa.DefaultWaitTimeoutInMinutes) + timeOut := common.OptionalInt64(state.MaxClusterWaitTimeoutInMinutes) + timeOut, err = common.ValidateTimeout(timeOut, rosa.MaxClusterWaitTimeoutInMinutes) + if err != nil { + response.Diagnostics.AddError( + "Waiting for cluster creation finished with error", + fmt.Sprintf("Waiting for cluster creation finished with the error %v", err), + ) + } + object, err = r.ClusterWait.WaitForClusterToBeReady(ctx, object.ID(), *timeOut) if err != nil { response.Diagnostics.AddError( "Waiting for cluster creation finished with error", @@ -1372,7 +1384,7 @@ func (r *ClusterRosaClassicResource) Delete(ctx context.Context, request resourc if common.HasValue(state.DisableWaitingInDestroy) && state.DisableWaitingInDestroy.ValueBool() { tflog.Info(ctx, "Waiting for destroy to be completed, is disabled") } else { - timeout := rosa.DefaultWaitTimeoutInMinutes + timeout := rosa.MaxClusterWaitTimeoutInMinutes if common.HasValue(state.DestroyTimeout) { if state.DestroyTimeout.ValueInt64() <= 0 { response.Diagnostics.AddWarning(rosa.NonPositiveTimeoutSummary, fmt.Sprintf(rosa.NonPositiveTimeoutFormat, state.ID.ValueString())) diff --git a/provider/clusterrosa/classic/cluster_rosa_classic_state.go b/provider/clusterrosa/classic/cluster_rosa_classic_state.go index e7b18fbc..94c5e272 100644 --- a/provider/clusterrosa/classic/cluster_rosa_classic_state.go +++ b/provider/clusterrosa/classic/cluster_rosa_classic_state.go @@ -76,7 +76,8 @@ type ClusterRosaClassicState struct { UpgradeAcksFor types.String `tfsdk:"upgrade_acknowledgements_for"` - DisableWaitingInDestroy types.Bool `tfsdk:"disable_waiting_in_destroy"` - DestroyTimeout types.Int64 `tfsdk:"destroy_timeout"` - WaitForCreateComplete types.Bool `tfsdk:"wait_for_create_complete"` + DisableWaitingInDestroy types.Bool `tfsdk:"disable_waiting_in_destroy"` + DestroyTimeout types.Int64 `tfsdk:"destroy_timeout"` + WaitForCreateComplete types.Bool `tfsdk:"wait_for_create_complete"` + MaxClusterWaitTimeoutInMinutes types.Int64 `tfsdk:"max_cluster_wait_timeout_in_minutes"` } diff --git a/provider/clusterrosa/common/consts.go b/provider/clusterrosa/common/consts.go index 518e1de9..4ee8979b 100644 --- a/provider/clusterrosa/common/consts.go +++ b/provider/clusterrosa/common/consts.go @@ -15,11 +15,12 @@ const ( PropertyRosaTfCommit = tagsPrefix + "tf_commit" PropertyRosaCreatorArn = tagsPrefix + "creator_arn" - DefaultWaitTimeoutForHCPControlPlaneInMinutes = int64(45) - DefaultWaitTimeoutInMinutes = int64(60) - DefaultPollingIntervalInMinutes = 2 - NonPositiveTimeoutSummary = "Can't poll cluster state with a non-positive timeout" - NonPositiveTimeoutFormat = "Can't poll state of cluster with identifier '%s', the timeout that was set is not a positive number" + MaxHCPClusterWaitTimeoutInMinutes = int64(45) + MaxClusterWaitTimeoutInMinutes = int64(60) + MaxMachinePoolWaitTimeoutInMinutes = int64(60) + DefaultPollingIntervalInMinutes = 2 + NonPositiveTimeoutSummary = "Can't poll cluster state with a non-positive timeout" + NonPositiveTimeoutFormat = "Can't poll state of cluster with identifier '%s', the timeout that was set is not a positive number" MaxClusterNameLength = 54 MaxClusterDomainPrefixLength = 15 diff --git a/provider/clusterrosa/hcp/resource.go b/provider/clusterrosa/hcp/resource.go index 554f4804..d65f7569 100644 --- a/provider/clusterrosa/hcp/resource.go +++ b/provider/clusterrosa/hcp/resource.go @@ -331,6 +331,14 @@ func (r *ClusterRosaHcpResource) Schema(ctx context.Context, req resource.Schema Description: "Wait until the cluster standard compute pools are created. The waiter has a timeout of 60 minutes, with the default value set to false. This can only be provided when also waiting for create completion.", Optional: true, }, + "max_hcp_cluster_wait_timeout_in_minutes": schema.Int64Attribute{ + Description: "This value sets the maximum duration in minutes to wait for a HCP cluster to be in a ready state.", + Optional: true, + }, + "max_machinepool_wait_timeout_in_minutes": schema.Int64Attribute{ + Description: "This value sets the maximum duration in minutes to wait for machine pools to be in a ready state.", + Optional: true, + }, "create_admin_user": schema.BoolAttribute{ Description: "Indicates if create cluster admin user. Set it true to create cluster admin user with default username `cluster-admin` " + "and generated password. It will be ignored if `admin_credentials` is set." + common.ValueCannotBeChangedStringDescription, @@ -418,7 +426,7 @@ func (r *ClusterRosaHcpResource) Configure(ctx context.Context, req resource.Con if !ok { resp.Diagnostics.AddError( "Unexpected Resource Configure Type", - fmt.Sprintf("Expected *sdk.Connaction, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *sdk.Connection, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return } @@ -814,7 +822,15 @@ func (r *ClusterRosaHcpResource) Create(ctx context.Context, request resource.Cr if shouldWaitCreationComplete { tflog.Info(ctx, "Waiting for cluster to get ready") - object, err = r.ClusterWait.WaitForClusterToBeReady(ctx, object.ID(), rosa.DefaultWaitTimeoutForHCPControlPlaneInMinutes) + timeOut := common.OptionalInt64(state.MaxHCPClusterWaitTimeoutInMinutes) + timeOut, err = common.ValidateTimeout(timeOut, rosa.MaxHCPClusterWaitTimeoutInMinutes) + if err != nil { + response.Diagnostics.AddError( + "Waiting for cluster creation finished with error", + fmt.Sprintf("Waiting for cluster creation finished with the error %v", err), + ) + } + object, err = r.ClusterWait.WaitForClusterToBeReady(ctx, object.ID(), *timeOut) if err != nil { response.Diagnostics.AddError( "Waiting for cluster creation finished with error", @@ -828,7 +844,15 @@ func (r *ClusterRosaHcpResource) Create(ctx context.Context, request resource.Cr } if shouldWaitComputeNodesComplete { tflog.Info(ctx, "Waiting for standard compute nodes to get ready") - object, err = r.ClusterWait.WaitForStdComputeNodesToBeReady(ctx, object.ID(), rosa.DefaultWaitTimeoutInMinutes) + timeOut := common.OptionalInt64(state.MaxMachinePoolWaitTimeoutInMinutes) + timeOut, err = common.ValidateTimeout(timeOut, rosa.MaxMachinePoolWaitTimeoutInMinutes) + if err != nil { + response.Diagnostics.AddError( + "Waiting for cluster creation finished with error", + fmt.Sprintf("Waiting for cluster creation finished with the error %v", err), + ) + } + object, err = r.ClusterWait.WaitForStdComputeNodesToBeReady(ctx, object.ID(), *timeOut) if err != nil { response.Diagnostics.AddError( "Waiting for std compute nodes completion finished with error", @@ -1333,7 +1357,7 @@ func (r *ClusterRosaHcpResource) Delete(ctx context.Context, request resource.De if common.HasValue(state.DisableWaitingInDestroy) && state.DisableWaitingInDestroy.ValueBool() { tflog.Info(ctx, "Waiting for destroy to be completed, is disabled") } else { - timeout := rosa.DefaultWaitTimeoutInMinutes + timeout := rosa.MaxMachinePoolWaitTimeoutInMinutes if common.HasValue(state.DestroyTimeout) { if state.DestroyTimeout.ValueInt64() <= 0 { response.Diagnostics.AddWarning(rosa.NonPositiveTimeoutSummary, fmt.Sprintf(rosa.NonPositiveTimeoutFormat, state.ID.ValueString())) diff --git a/provider/clusterrosa/hcp/state.go b/provider/clusterrosa/hcp/state.go index c4d7a3a2..b398a47b 100644 --- a/provider/clusterrosa/hcp/state.go +++ b/provider/clusterrosa/hcp/state.go @@ -55,10 +55,12 @@ type ClusterRosaHcpState struct { UpgradeAcksFor types.String `tfsdk:"upgrade_acknowledgements_for"` // Meta fields - not related to cluster spec - DisableWaitingInDestroy types.Bool `tfsdk:"disable_waiting_in_destroy"` - DestroyTimeout types.Int64 `tfsdk:"destroy_timeout"` - WaitForCreateComplete types.Bool `tfsdk:"wait_for_create_complete"` - WaitForStdComputeNodesComplete types.Bool `tfsdk:"wait_for_std_compute_nodes_complete"` + DisableWaitingInDestroy types.Bool `tfsdk:"disable_waiting_in_destroy"` + DestroyTimeout types.Int64 `tfsdk:"destroy_timeout"` + WaitForCreateComplete types.Bool `tfsdk:"wait_for_create_complete"` + WaitForStdComputeNodesComplete types.Bool `tfsdk:"wait_for_std_compute_nodes_complete"` + MaxHCPClusterWaitTimeoutInMinutes types.Int64 `tfsdk:"max_hcp_cluster_wait_timeout_in_minutes"` + MaxMachinePoolWaitTimeoutInMinutes types.Int64 `tfsdk:"max_machinepool_wait_timeout_in_minutes"` // Admin user fields CreateAdminUser types.Bool `tfsdk:"create_admin_user"` diff --git a/provider/common/cluster_waiter.go b/provider/common/cluster_waiter.go index 0efd1f9d..4ec51127 100644 --- a/provider/common/cluster_waiter.go +++ b/provider/common/cluster_waiter.go @@ -173,3 +173,13 @@ func pollClusterState(clusterId string, ctx context.Context, timeout int64, clus return object, nil } + +func ValidateTimeout(timeOut *int64, defaultTimeout int64) (*int64, error) { + if timeOut == nil { + return &defaultTimeout, nil + } + if *timeOut <= 0 { + return nil, fmt.Errorf("timeout must be greater than 0 minutes") + } + return timeOut, nil +} diff --git a/provider/identityprovider/github.go b/provider/identityprovider/github.go index 4838f6cb..f11d386e 100644 --- a/provider/identityprovider/github.go +++ b/provider/identityprovider/github.go @@ -89,19 +89,19 @@ func githubHostnameValidator() validator.String { hostname := req.ConfigValue hostnameStr := hostname.ValueString() if hostnameStr == "" { - return + return } if hostnameStr == "github.com" || strings.HasSuffix(hostnameStr, ".github.com") { resp.Diagnostics.AddAttributeError(req.Path, "invalid hostname", - fmt.Sprintf("'%s' hostname cannot be equal to [*.]github.com", hostnameStr), - ) - return + fmt.Sprintf("'%s' hostname cannot be equal to [*.]github.com", hostnameStr), + ) + return } if !(len(validation.IsDNS1123Subdomain(hostnameStr)) == 0 || netutils.ParseIPSloppy(hostnameStr) != nil) { resp.Diagnostics.AddAttributeError(req.Path, "invalid hostname", - fmt.Sprintf("'%s' hostname must be a valid DNS subdomain or IP address", hostnameStr), - ) - return + fmt.Sprintf("'%s' hostname must be a valid DNS subdomain or IP address", hostnameStr), + ) + return } }) }