diff --git a/go.mod b/go.mod index 73797a2a..37e6e20a 100644 --- a/go.mod +++ b/go.mod @@ -45,6 +45,7 @@ require ( github.com/fatih/color v1.13.0 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/goinggo/mapstructure v0.0.0-20140717182941-194205d9b4a9 github.com/golang-jwt/jwt/v4 v4.4.1 // indirect github.com/golang/glog v1.1.2 // indirect github.com/golang/protobuf v1.5.3 // indirect diff --git a/go.sum b/go.sum index c0c8f8f2..4078cad3 100644 --- a/go.sum +++ b/go.sum @@ -161,6 +161,8 @@ github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3a github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/goinggo/mapstructure v0.0.0-20140717182941-194205d9b4a9 h1:wqckanyE9qc/XnvnybC6SHOb8Nyd62QXAZOzA8twFig= +github.com/goinggo/mapstructure v0.0.0-20140717182941-194205d9b4a9/go.mod h1:64ikIrMv84B+raz7akXOqbF7cK3/OQQ/6cClY10oy7A= github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -514,6 +516,7 @@ github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThC github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= diff --git a/tests/ci/labels.go b/tests/ci/labels.go index 3ee8e857..8983b51f 100644 --- a/tests/ci/labels.go +++ b/tests/ci/labels.go @@ -15,6 +15,8 @@ var Day1Prepare = Label("day1-prepare") var Day1Negative = Label("day1-negative") var Day1Post = Label("day1-post") var Day2 = Label("day2") +var Upgrade = Label("upgrade") +var Update = Label("update") // day3 : the test cases will destroy default resource var Day3 = Label("day3") diff --git a/tests/ci/profile_handler.go b/tests/ci/profile_handler.go index 212bab50..eaf04cfe 100644 --- a/tests/ci/profile_handler.go +++ b/tests/ci/profile_handler.go @@ -188,6 +188,13 @@ func PrepareVersion(connection *client.Connection, versionTag string, channelGro } return vResult } + +func GetMajorVersion(rawVersion string) string { + versionRegex := regexp.MustCompile(`^[0-9]+\.[0-9]+`) + vResult := versionRegex.FindAllStringSubmatch(rawVersion, 1)[0][0] + return vResult +} + func PrepareProxy() {} func PrepareKMSKey(profile *Profile, kmsName string, accountRolePrefix string, accountRolePath string) (string, error) { @@ -303,7 +310,8 @@ func GenerateClusterCreationArgsByProfile(token string, profile *Profile) (clust } if profile.STS { - accountRolesOutput, err := PrepareAccountRoles(token, clusterArgs.ClusterName, profile.UnifiedAccRolesPath, clusterArgs.AWSRegion, profile.MajorVersion, profile.ChannelGroup, CON.AccountRolesDir) + majorVersion := GetMajorVersion(profile.Version) + accountRolesOutput, err := PrepareAccountRoles(token, clusterArgs.ClusterName, profile.UnifiedAccRolesPath, clusterArgs.AWSRegion, majorVersion, profile.ChannelGroup, CON.AccountRolesDir) Expect(err).ToNot(HaveOccurred()) clusterArgs.AccountRolePrefix = accountRolesOutput.AccountRolePrefix clusterArgs.UnifiedAccRolesPath = profile.UnifiedAccRolesPath diff --git a/tests/ci/profiles/tf_cluster_profile.yml b/tests/ci/profiles/tf_cluster_profile.yml index 27fd3030..ddc852b6 100644 --- a/tests/ci/profiles/tf_cluster_profile.yml +++ b/tests/ci/profiles/tf_cluster_profile.yml @@ -61,8 +61,8 @@ profiles: additional_sg_number: 4 worker_disk_size: 200 unified_acc_role_path: "/unified/" -# rosa-sts-up :: creating a managed cluster for upgrade purpose -- as: rosa-sts-up +# rosa-up-y :: creating a cluster for y-stream upgrade purpose +- as: rosa-up-y cluster: multi_az: false product_id: "rosa" @@ -79,14 +79,46 @@ profiles: fips: false autoscale: false byok: true - version: "z-1" + version: "y-1" major_version: "4.14" compute_machine_type: "m5.xlarge" proxy: false labeling: false + tagging: true + channel_group: stable + zones: "" + imdsv2: "" + oidc_config: "managed" + admin_enabled: true + unified_acc_role_path: "/uni-fied/" +# rosa-up-z :: creating a cluster for z-stream upgrade purpose +- as: rosa-up-z + cluster: + multi_az: true + product_id: "rosa" + hypershift: false + cloud_provider: "aws" + region: "ap-northeast-1" + ccs: true + sts: true + byovpc: false + private_link: false + private: false + etcd_encryption: true + kms_key_arn: false + fips: false + autoscale: false + byok: true + version: "z-1" + major_version: "4.14" + compute_machine_type: "m5.xlarge" + proxy: false + labeling: true tagging: false - channel_group: candidate + channel_group: stable zones: "" imdsv2: "" oidc_config: "managed" - admin_enabled: true \ No newline at end of file + admin_enabled: true + unified_acc_role_path: "" + \ No newline at end of file diff --git a/tests/e2e/cluster_creation_test.go b/tests/e2e/cluster_creation_test.go index 23363524..bf181e4a 100644 --- a/tests/e2e/cluster_creation_test.go +++ b/tests/e2e/cluster_creation_test.go @@ -1,14 +1,10 @@ package e2e import ( - "fmt" - "time" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" CI "github.com/terraform-redhat/terraform-provider-rhcs/tests/ci" CON "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/constants" - . "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/log" "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/openshift" ) @@ -23,16 +19,10 @@ var _ = Describe("RHCS Provider Test", func() { Expect(err).ToNot(HaveOccurred()) Expect(clusterID).ToNot(BeEmpty()) //TODO: implement waiter for the private cluster once bastion is implemented - if CON.GetEnvWithDefault(CON.WaitOperators, "false") == "true" { + if CON.GetEnvWithDefault(CON.WaitOperators, "false") == "true" && !profile.Private { // WaitClusterOperatorsToReadyStatus will wait for cluster operators ready timeout := 60 - timeoutMin := time.Duration(timeout) - console, err := openshift.NewConsole(clusterID, CI.RHCSConnection) - if err != nil { - Logger.Warnf("Got error %s when config the openshift console. Return without waiting for operators ready", err.Error()) - return - } - _, err = openshift.RetryCMDRun(fmt.Sprintf("oc wait clusteroperators --all --for=condition=Progressing=false --kubeconfig %s --timeout %dm", console.KubePath, timeout), timeoutMin) + err = openshift.WaitForOperatorsToBeReady(CI.RHCSConnection, clusterID, timeout) Expect(err).ToNot(HaveOccurred()) } }) diff --git a/tests/e2e/cluster_upgrade_test.go b/tests/e2e/cluster_upgrade_test.go new file mode 100644 index 00000000..168bd134 --- /dev/null +++ b/tests/e2e/cluster_upgrade_test.go @@ -0,0 +1,157 @@ +package e2e + +import ( + "fmt" + "strconv" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + CI "github.com/terraform-redhat/terraform-provider-rhcs/tests/ci" + CMS "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/cms" + CON "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/constants" + EXE "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/exec" + "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/openshift" +) + +var _ = Describe("RHCS Provider Test", func() { + Describe("Upgrade cluster tests", func() { + + var targetV string + var clusterID string + BeforeEach(OncePerOrdered, func() { + clusterID, err = CI.PrepareRHCSClusterByProfileENV() + Expect(err).ToNot(HaveOccurred()) + + }) + Context("Author:amalykhi-Critical-OCP-63153 @OCP-63153 @amalykhi", func() { + It("Z-stream upgrade a ROSA STS cluster with RHCS provider", CI.Upgrade, + func() { + if profile.Version != "z-1" { + Skip("The test is configured only for Z-stream upgrade") + } + clusterResp, err := CMS.RetrieveClusterDetail(CI.RHCSConnection, clusterID) + targetV, err = CMS.GetVersionUpgradeTarget(clusterResp.Body().Version().RawID(), + CON.Z, clusterResp.Body().Version().AvailableUpgrades()) + Expect(err).ToNot(HaveOccurred()) + + clusterService, err := EXE.NewClusterService(profile.ManifestsDIR) + Expect(err).ToNot(HaveOccurred()) + + By("Validate invalid OCP version - downgrade") + currentVersion := string(clusterResp.Body().Version().RawID()) + splittedVersion := strings.Split(currentVersion, ".") + zStreamV, err := strconv.Atoi(splittedVersion[2]) + Expect(err).ToNot(HaveOccurred()) + + downgradedVersion := fmt.Sprintf("%s.%s.%s", splittedVersion[0], splittedVersion[1], fmt.Sprint(zStreamV-1)) + + clusterArgs := &EXE.ClusterCreationArgs{ + OpenshiftVersion: downgradedVersion, + } + err = clusterService.Apply(clusterArgs, false, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cluster version is already above the\nrequested version")) + + By("Run the cluster update") + clusterArgs = &EXE.ClusterCreationArgs{ + OpenshiftVersion: targetV, + } + err = clusterService.Apply(clusterArgs, false, false) + Expect(err).ToNot(HaveOccurred()) + + By("Wait the upgrade finished") + err = openshift.WaitClusterUpgradeFinished(CI.RHCSConnection, clusterID) + Expect(err).ToNot(HaveOccurred(), "Cluster upgrade %s failed with the error %v", clusterID, err) + + By("Check the cluster status and OCP version") + clusterResp, err = CMS.RetrieveClusterDetail(CI.RHCSConnection, clusterID) + Expect(err).ToNot(HaveOccurred()) + Expect(string(clusterResp.Body().State())).To(Equal(CON.Ready)) + Expect(string(clusterResp.Body().Version().RawID())).To(Equal(targetV)) + + if CON.GetEnvWithDefault(CON.WaitOperators, "false") == "true" && !profile.Private { + // WaitClusterOperatorsToReadyStatus will wait for cluster operators ready + timeout := 60 + err = openshift.WaitForOperatorsToBeReady(CI.RHCSConnection, clusterID, timeout) + Expect(err).ToNot(HaveOccurred()) + } + }) + }) + Context("Author:amalykhi-Critical-OCP-63152 @OCP-63152 @amalykhi", func() { + It("Y-stream Upgrade ROSA STS cluster with RHCS provider", CI.Upgrade, + func() { + if profile.Version != "y-1" { + Skip("The test is configured only for Y-stream upgrade") + } + + clusterResp, err := CMS.RetrieveClusterDetail(CI.RHCSConnection, clusterID) + + targetV, err = CMS.GetVersionUpgradeTarget(clusterResp.Body().Version().RawID(), + CON.Y, clusterResp.Body().Version().AvailableUpgrades()) + Expect(err).ToNot(HaveOccurred()) + + By("Upgrade account-roles") + majorVersion := CI.GetMajorVersion(targetV) + _, err = CI.PrepareAccountRoles(token, clusterResp.Body().Name(), profile.UnifiedAccRolesPath, profile.Region, majorVersion, profile.ChannelGroup, CON.AccountRolesDir) + Expect(err).ToNot(HaveOccurred()) + + clusterService, err := EXE.NewClusterService(profile.ManifestsDIR) + Expect(err).ToNot(HaveOccurred()) + + By("Validate invalid OCP version field - downgrade") + currentVersion := string(clusterResp.Body().Version().RawID()) + splittedVersion := strings.Split(currentVersion, ".") + yStreamV, err := strconv.Atoi(splittedVersion[1]) + Expect(err).ToNot(HaveOccurred()) + + downgradedVersion := fmt.Sprintf("%s.%s.%s", splittedVersion[0], fmt.Sprint(yStreamV-1), splittedVersion[2]) + + clusterArgs := &EXE.ClusterCreationArgs{ + OpenshiftVersion: downgradedVersion, + } + err = clusterService.Apply(clusterArgs, false, false) + Expect(err).To(HaveOccurred()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("cluster version is already above the\nrequested version")) + + By("Validate the cluster Upgrade upgrade_acknowledge field") + + clusterArgs = &EXE.ClusterCreationArgs{ + OpenshiftVersion: targetV, + } + + err = clusterService.Apply(clusterArgs, false, false) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("Missing required acknowledgements to schedule upgrade")) + + By("Apply the cluster Upgrade") + + clusterArgs = &EXE.ClusterCreationArgs{ + OpenshiftVersion: targetV, + UpgradeAcknowledgementsFor: majorVersion, + } + + err = clusterService.Apply(clusterArgs, false, false) + Expect(err).ToNot(HaveOccurred()) + + By("Wait the upgrade finished") + err = openshift.WaitClusterUpgradeFinished(CI.RHCSConnection, clusterID) + Expect(err).ToNot(HaveOccurred(), "Cluster %s failed with the error %v", clusterID, err) + + By("Check the cluster status and OCP version") + clusterResp, err = CMS.RetrieveClusterDetail(CI.RHCSConnection, clusterID) + Expect(err).ToNot(HaveOccurred()) + Expect(string(clusterResp.Body().State())).To(Equal(CON.Ready)) + Expect(string(clusterResp.Body().Version().RawID())).To(Equal(targetV)) + + if CON.GetEnvWithDefault(CON.WaitOperators, "false") == "true" && !profile.Private { + // WaitClusterOperatorsToReadyStatus will wait for cluster operators ready + timeout := 60 + err = openshift.WaitForOperatorsToBeReady(CI.RHCSConnection, clusterID, timeout) + Expect(err).ToNot(HaveOccurred()) + } + }) + }) + }) +}) diff --git a/tests/e2e/general_day_two_test.go b/tests/e2e/general_day_two_test.go index 882499af..8cd532cd 100644 --- a/tests/e2e/general_day_two_test.go +++ b/tests/e2e/general_day_two_test.go @@ -1,8 +1,6 @@ package e2e import ( - "path/filepath" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ci "github.com/terraform-redhat/terraform-provider-rhcs/tests/ci" @@ -88,8 +86,7 @@ var _ = Describe("TF day2 scenrios", func() { Expect(err).ShouldNot(HaveOccurred()) // Read terraform.tfvars file and get its content as a map - terraformTFVarsPath := filepath.Join(profile.ManifestsDIR, "terraform.tfvars") - terraformTFVarsContent := exe.ReadTerraformTFVars(terraformTFVarsPath) + terraformTFVarsContent := exe.ReadTerraformTFVars(profile.ManifestsDIR) Expect(err).ShouldNot(HaveOccurred()) // Capture the original custom properties diff --git a/tests/tf-manifests/rhcs/clusters/rosa-classic/main.tf b/tests/tf-manifests/rhcs/clusters/rosa-classic/main.tf index c38a0c74..f1759f01 100644 --- a/tests/tf-manifests/rhcs/clusters/rosa-classic/main.tf +++ b/tests/tf-manifests/rhcs/clusters/rosa-classic/main.tf @@ -92,9 +92,11 @@ resource "rhcs_cluster_rosa_classic" "rosa_sts_cluster" { aws_additional_infra_security_group_ids = var.additional_infra_security_groups aws_additional_control_plane_security_group_ids = var.additional_control_plane_security_groups destroy_timeout = 120 + upgrade_acknowledgements_for = var.upgrade_acknowledgements_for lifecycle { ignore_changes = [availability_zones] } + wait_for_create_complete = true } resource "rhcs_cluster_wait" "rosa_cluster" { diff --git a/tests/tf-manifests/rhcs/clusters/rosa-classic/variables.tf b/tests/tf-manifests/rhcs/clusters/rosa-classic/variables.tf index 0ee71536..12b310e5 100644 --- a/tests/tf-manifests/rhcs/clusters/rosa-classic/variables.tf +++ b/tests/tf-manifests/rhcs/clusters/rosa-classic/variables.tf @@ -213,4 +213,9 @@ variable "path" { description = "(Optional) The arn path for the account/operator roles as well as their policies." type = string default = null +} + +variable "upgrade_acknowledgements_for" { + type = string + default = null } \ No newline at end of file diff --git a/tests/utils/cms/cms.go b/tests/utils/cms/cms.go index 50439ad1..e6c07013 100644 --- a/tests/utils/cms/cms.go +++ b/tests/utils/cms/cms.go @@ -266,6 +266,11 @@ func ListUpgradePolicies(connection *client.Connection, clusterID string, params return request.Send() } +func GetUpgradePolicyState(connection *client.Connection, clusterID string, upgradepolicyID string) (*cmv1.UpgradePolicyStateGetResponse, error) { + resp, err := connection.ClustersMgmt().V1().Clusters().Cluster(clusterID).UpgradePolicies().UpgradePolicy(upgradepolicyID).State().Get().Send() + return resp, err +} + func RetrieveUpgradePolicies(connection *client.Connection, clusterID string, upgradepolicyID string) (*cmv1.UpgradePolicyGetResponse, error) { resp, err := connection.ClustersMgmt().V1().Clusters().Cluster(clusterID).UpgradePolicies().UpgradePolicy(upgradepolicyID).Get().Send() return resp, err diff --git a/tests/utils/constants/constants.go b/tests/utils/constants/constants.go index a3695271..b603c904 100644 --- a/tests/utils/constants/constants.go +++ b/tests/utils/constants/constants.go @@ -5,18 +5,45 @@ import ( "os" "path" "strings" + + cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" ) +// OCP version streams const ( X = "x" Y = "y" Z = "z" +) +const ( UnderscoreConnector string = "_" DotConnector string = "." HyphenConnector string = "-" ) +// Upgrade Policy States +const ( + Pending = "pending" + Scheduled = "scheduled" + Started = "started" + Completed = "completed" + Delayed = "delayed" + Failed = "failed" + Cancelled = "cancelled" + Waiting = "waiting" +) + +// Cluster state +const ( + Ready = "ready" +) + +var ( + AutomaticScheduleType cmv1.ScheduleType = "automatic" + ManualScheduleType cmv1.ScheduleType = "manual" +) + // Below constants is the env variable name defined to run on different testing requirements const ( TokenENVName = "RHCS_TOKEN" diff --git a/tests/utils/exec/cluster.go b/tests/utils/exec/cluster.go index 5f3dcf62..399a028e 100644 --- a/tests/utils/exec/cluster.go +++ b/tests/utils/exec/cluster.go @@ -43,6 +43,7 @@ type ClusterCreationArgs struct { DisableUWM bool `json:"disable_workload_monitoring,omitempty"` Proxy *Proxy `json:"proxy,omitempty"` UnifiedAccRolesPath string `json:"path,omitempty"` + UpgradeAcknowledgementsFor string `json:"upgrade_acknowledgements_for,omitempty"` } type Proxy struct { HTTPProxy string `json:"http_proxy,omitempty"` diff --git a/tests/utils/exec/tf-exec.go b/tests/utils/exec/tf-exec.go index 58549cd5..89d6b1b1 100644 --- a/tests/utils/exec/tf-exec.go +++ b/tests/utils/exec/tf-exec.go @@ -183,10 +183,10 @@ func runTerraformImportWithArgs(ctx context.Context, dir string, terraformArgs [ return output, nil } -func combineArgs(varAgrs map[string]interface{}, abArgs ...string) ([]string, map[string]string) { +func combineArgs(varArgs map[string]interface{}, abArgs ...string) ([]string, map[string]string) { args := []string{} tfArgs := map[string]string{} - for k, v := range varAgrs { + for k, v := range varArgs { var argV string var tfvarV string switch v := v.(type) { @@ -234,7 +234,8 @@ func recordTFvarsFile(fileDir string, tfvars map[string]string) error { } // Function to read terraform.tfvars file and return its content as a map -func ReadTerraformTFVars(filePath string) map[string]string { +func ReadTerraformTFVars(dirPath string) map[string]string { + filePath := CON.GrantTFvarsFile(dirPath) content, err := os.ReadFile(filePath) if err != nil { Logger.Errorf("Can't read file %s - not found or could not be fetched", filePath) diff --git a/tests/utils/openshift/console.go b/tests/utils/openshift/console.go index 58ad4fac..9fbdf0b7 100644 --- a/tests/utils/openshift/console.go +++ b/tests/utils/openshift/console.go @@ -7,6 +7,8 @@ import ( "os" "path/filepath" + "github.com/goinggo/mapstructure" + client "github.com/openshift-online/ocm-sdk-go" CMS "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/cms" CON "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/constants" @@ -70,3 +72,48 @@ func (c *Console) Config(kubeConfig string) (*Console, error) { c.KubePath = configFile return c, err } + +// GetClusterVersion will return the Cluster version +func (c *Console) GetClusterVersion() (version string, err error) { + stdout, _, err := h.RunCMD("oc version -o json") + fmt.Println(stdout) + if err != nil { + return + } + version = h.DigString(h.Parse([]byte(stdout)), "openshiftVersion") + return + +} + +// GetPods function will return list of *Pod with informations +// If namespace passed as parameter, will return the pod list of the specified namespace +func (c *Console) GetPods(namespace ...string) ([]*Pod, error) { + var pods []*Pod + var columns = make(map[string][]interface{}) + columns["name"] = []interface{}{"metadata", "name"} + columns["ip"] = []interface{}{"status", "podIP"} + columns["status"] = []interface{}{"status", "phase"} + columns["startTime"] = []interface{}{"status", "startTime"} + columns["hostIP"] = []interface{}{"status", "hostIP"} + CMD := fmt.Sprintf("oc get pod -o json") + if len(namespace) == 1 { + CMD = fmt.Sprintf("oc get pod -n %s -o json --kubeconfig %s", namespace[0], c.KubePath) + } + stdout, _, err := h.RunCMD(CMD) + if err != nil { + return pods, err + } + podAttrList, err := FigureStdout(stdout, columns) + if err != nil { + panic(err) + } + for _, podAttr := range podAttrList { + pod := Pod{} + err = mapstructure.Decode(podAttr, &pod) + if err != nil { + return pods, err + } + pods = append(pods, &pod) + } + return pods, err +} diff --git a/tests/utils/openshift/openshift.go b/tests/utils/openshift/openshift.go index 0b127aa5..f6fe2479 100644 --- a/tests/utils/openshift/openshift.go +++ b/tests/utils/openshift/openshift.go @@ -2,9 +2,14 @@ package openshift import ( "fmt" + "net/http" "strings" "time" + client "github.com/openshift-online/ocm-sdk-go" + ci "github.com/terraform-redhat/terraform-provider-rhcs/tests/ci" + CMS "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/cms" + CON "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/constants" h "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/helper" . "github.com/terraform-redhat/terraform-provider-rhcs/tests/utils/log" ) @@ -18,6 +23,15 @@ type OcAttributes struct { Timeout time.Duration } +// Pod struct is struct that contains info +type Pod struct { + Name string `json:"name,omitempty"` + IP string `json:"ip,omitempty"` + Status string `json:"status,omitempty"` + StartTime string `json:"startTime,omitempty"` + HostIP string `json:"hostIP,omitempty"` +} + func GenerateOCLoginCMD(server string, username string, password string, clusterid string, additioanlFlags ...string) string { cmd := fmt.Sprintf("oc login %s --username %s --password %s", server, username, password) @@ -56,3 +70,129 @@ func OcLogin(ocLoginAtter OcAttributes) (string, error) { return output, err } + +func WaitForOperatorsToBeReady(connection *client.Connection, clusterID string, timeout int) error { + // WaitClusterOperatorsToReadyStatus will wait for cluster operators ready + timeoutMin := time.Duration(timeout) + console, err := NewConsole(clusterID, connection) + if err != nil { + Logger.Warnf("Got error %s when config the openshift console. Return without waiting for operators ready", err.Error()) + return err + } + _, err = RetryCMDRun(fmt.Sprintf("oc wait clusteroperators --all --for=condition=Progressing=false --kubeconfig %s --timeout %dm", console.KubePath, timeout), timeoutMin) + return err +} + +func RestartMUOPods(connection *client.Connection, clusterID string) error { + MUONamespace := "openshift-managed-upgrade-operator" + console, err := NewConsole(clusterID, connection) + if err != nil { + return err + } + pods, err := console.GetPods(MUONamespace) + for _, pod := range pods { + cmd := fmt.Sprintf("oc delete pod/%s -n %s --kubeconfig %s", pod.Name, MUONamespace, console.KubePath) + _, _, err = h.RunCMD(cmd) + if err != nil { + return err + } + } + return nil +} + +// WaitForUpgradePolicyToState will time out after minutes +// Be careful for state completed. Make sure the automatic policy is in status of other status rather than pending +func WaitForUpgradePolicyToState(connection *client.Connection, clusterID string, policyID string, state string, timeout int) error { + fmt.Println("Going to wait upgrade to status ", state) + startTime := time.Now() + resp, err := CMS.RetrieveUpgradePolicies(connection, clusterID, policyID) + if err != nil { + return err + } + if resp.Status() != http.StatusOK { + return fmt.Errorf(">>> Error happened when retrieve policy detail: %s", resp.Error()) + } + scheduleType := resp.Body().ScheduleType() + + for time.Now().Before(startTime.Add(time.Duration(timeout) * time.Minute)) { + stateResp, _ := CMS.GetUpgradePolicyState(connection, clusterID, policyID) + + switch state { + case CON.Completed: + if scheduleType == CON.ManualScheduleType { + if stateResp.Status() == http.StatusNotFound { + return nil + } else if resp.Status() != http.StatusOK { + return fmt.Errorf(">>> Got response %s when retrieve the policy state: %s", resp.Error(), state) + } + } else { + if stateResp.Status() != http.StatusOK { + return fmt.Errorf(">>> Got response %s when retrieve the policy state: %s", resp.Error(), state) + } + if stateResp.Body().Value() == CON.Pending { + return nil + } + } + + default: + if resp.Status() != http.StatusOK { + return fmt.Errorf(">>> Got response %s when retrieve the policy state: %s", resp.Error(), state) + } + if string(stateResp.Body().Value()) == state { + return nil + } + + } + + time.Sleep(1 * time.Minute) + + } + return fmt.Errorf("ERROR!Timeout after %d minutes to wait for the policy %s into status %s of cluster %s", + timeout, policyID, state, clusterID) + +} + +func WaitClusterUpgradeFinished(connection *client.Connection, clusterID string) error { + Logger.Infof("Get the automatic policy created for the cluster upgrade") + policyIDs, err := CMS.ListUpgradePolicies(ci.RHCSConnection, clusterID) + if err != nil { + return err + } + policyID := policyIDs.Items().Get(0).ID() + + Logger.Infof("Wait the policy to be scheduled") + err = WaitForUpgradePolicyToState(ci.RHCSConnection, clusterID, policyID, CON.Scheduled, 2) + if err != nil { + return fmt.Errorf("Policy %s not moved to state %s in 2 minutes with the error: %s", CON.Scheduled, policyID, err.Error()) + } + + Logger.Infof("Restart the MUO operator pod to make the policy synced") + err = RestartMUOPods(ci.RHCSConnection, clusterID) + if err != nil { + return err + } + Logger.Infof("Watch for the upgrade Started in 1 hour") + err = WaitForUpgradePolicyToState(ci.RHCSConnection, clusterID, policyID, CON.Started, 60) + if err != nil { + return fmt.Errorf("Policy %s not moved to state %s in 1 hour with the error: %s", CON.Started, policyID, err.Error()) + } + Logger.Infof("Watch for the upgrade finished in 2 hours") + err = WaitForUpgradePolicyToState(ci.RHCSConnection, clusterID, policyID, CON.Completed, 2*60) + if err != nil { + return fmt.Errorf("Policy %s not moved to state %s in 2 hour with the error: %s", CON.Completed, policyID, err.Error()) + } + return nil +} + +// will return [map["NAME":"ip-10-0-130-210.us-east-2.compute.internal","STATUS":"Ready","ROLES":"worker"...]] +func FigureStdout(stdout string, columns map[string][]interface{}) (result []map[string]interface{}, err error) { + items := h.DigArray(h.Parse([]byte(stdout)), "items") + for _, item := range items { + newMap := map[string]interface{}{} + for key, pattern := range columns { + newMap[key] = h.Dig(item, pattern) + } + result = append(result, newMap) + } + return +}