From a833ee0ed9ba145d08a92a8a6f68729a50a071b1 Mon Sep 17 00:00:00 2001 From: Steve Kuznetsov Date: Wed, 15 May 2024 11:41:00 -0600 Subject: [PATCH] cmd: add an option to render into a file, use it in e2e The closest thing we have to testing the command-line is our end-to-end code that re-uses the options structs and runs them through the logic that the command-line does. We can capture a lot of the work done in the command-line by inspecting the output manifests, which is easiest to do by rendering them as a test artfact. Signed-off-by: Steve Kuznetsov --- cmd/cluster/cluster.go | 3 ++- cmd/cluster/core/create.go | 26 +++++++++++++++++++++----- cmd/cluster/kubevirt/create.go | 2 +- product-cli/cmd/cluster/cluster.go | 3 ++- test/e2e/util/fixture.go | 6 +++++- test/e2e/util/hypershift_framework.go | 16 ++++++++++++++-- 6 files changed, 45 insertions(+), 11 deletions(-) diff --git a/cmd/cluster/cluster.go b/cmd/cluster/cluster.go index f6fb33f6b1..5a37d942ba 100644 --- a/cmd/cluster/cluster.go +++ b/cmd/cluster/cluster.go @@ -62,7 +62,8 @@ func NewCreateCommands() *cobra.Command { cmd.PersistentFlags().StringVar(&opts.ReleaseStream, "release-stream", opts.ReleaseStream, "The OCP release stream for the cluster (e.g. 4.15.0-0.nightly), this flag is ignored if release-image is set") cmd.PersistentFlags().StringVar(&opts.PullSecretFile, "pull-secret", opts.PullSecretFile, "File path to a pull secret.") cmd.PersistentFlags().StringVar(&opts.ControlPlaneAvailabilityPolicy, "control-plane-availability-policy", opts.ControlPlaneAvailabilityPolicy, "Availability policy for hosted cluster components. Supported options: SingleReplica, HighlyAvailable") - cmd.PersistentFlags().BoolVar(&opts.Render, "render", opts.Render, "Render output as YAML to stdout instead of applying") + cmd.PersistentFlags().BoolVar(&opts.Render, "render", opts.Render, "Render output as YAML instead of applying") + cmd.PersistentFlags().StringVar(&opts.RenderInto, "render-into", opts.RenderInto, "Render output as YAML into this file instead of applying. If unset, YAML will be output to stdout.") cmd.PersistentFlags().StringVar(&opts.ControlPlaneOperatorImage, "control-plane-operator-image", opts.ControlPlaneOperatorImage, "Override the default image used to deploy the control plane operator") cmd.PersistentFlags().StringVar(&opts.SSHKeyFile, "ssh-key", opts.SSHKeyFile, "Path to an SSH key file") cmd.PersistentFlags().StringVar(&opts.AdditionalTrustBundle, "additional-trust-bundle", opts.AdditionalTrustBundle, "Path to a file with user CA bundle") diff --git a/cmd/cluster/core/create.go b/cmd/cluster/core/create.go index cfdebcc8ca..5eaa19a366 100644 --- a/cmd/cluster/core/create.go +++ b/cmd/cluster/core/create.go @@ -61,6 +61,7 @@ type CreateOptions struct { ReleaseImage string ReleaseStream string Render bool + RenderInto string SSHKeyFile string ServiceCIDR []string ClusterCIDR []string @@ -475,7 +476,7 @@ func GetAPIServerAddressByNode(ctx context.Context, l logr.Logger) (string, erro } func Validate(ctx context.Context, opts *CreateOptions) error { - if !opts.Render { + if !opts.Render && opts.RenderInto != "" { client, err := util.GetClient() if err != nil { return err @@ -498,7 +499,7 @@ func Validate(ctx context.Context, opts *CreateOptions) error { // Validate if mgmt cluster and NodePool CPU arches don't match, a multi-arch release image or stream was used // Exception for ppc64le arch since management cluster would be in x86 and node pools are going to be in ppc64le arch - if !opts.AWSPlatform.MultiArch && !opts.Render && opts.Arch != hyperv1.ArchitecturePPC64LE { + if !opts.AWSPlatform.MultiArch && (!opts.Render || opts.RenderInto != "") && opts.Arch != hyperv1.ArchitecturePPC64LE { mgmtClusterCPUArch, err := hyperutil.GetMgmtClusterCPUArch(ctx) if err != nil { return err @@ -538,13 +539,28 @@ func CreateCluster(ctx context.Context, opts *CreateOptions, platformSpecificApp } // In render mode, print the objects and return early - if opts.Render { + if opts.Render || opts.RenderInto != "" { + output := os.Stdout + if opts.RenderInto != "" { + var err error + output, err = os.Create(opts.RenderInto) + if err != nil { + return fmt.Errorf("failed to create file for rendering output: %w", err) + } + defer func() { + if err := output.Close(); err != nil { + fmt.Printf("failed to close file for rendering output: %v\n", err) + } + }() + } for _, object := range exampleOptions.Resources().AsObjects() { - err := hyperapi.YamlSerializer.Encode(object, os.Stdout) + err := hyperapi.YamlSerializer.Encode(object, output) if err != nil { return fmt.Errorf("failed to encode objects: %w", err) } - fmt.Println("---") + if _, err := fmt.Fprintln(output, "---"); err != nil { + return fmt.Errorf("failed to write object separator: %w", err) + } } return nil } diff --git a/cmd/cluster/kubevirt/create.go b/cmd/cluster/kubevirt/create.go index 2761086733..22b18e831d 100644 --- a/cmd/cluster/kubevirt/create.go +++ b/cmd/cluster/kubevirt/create.go @@ -99,7 +99,7 @@ func ApplyPlatformSpecificsValues(ctx context.Context, exampleOptions *apifixtur if opts.KubevirtPlatform.ServicePublishingStrategy != NodePortServicePublishingStrategy && opts.KubevirtPlatform.APIServerAddress != "" { return fmt.Errorf("external-api-server-address is supported only for NodePort service publishing strategy, service publishing strategy %s is used", opts.KubevirtPlatform.ServicePublishingStrategy) } - if opts.KubevirtPlatform.APIServerAddress == "" && opts.KubevirtPlatform.ServicePublishingStrategy == NodePortServicePublishingStrategy && !opts.Render { + if opts.KubevirtPlatform.APIServerAddress == "" && opts.KubevirtPlatform.ServicePublishingStrategy == NodePortServicePublishingStrategy && (!opts.Render || opts.RenderInto != "") { if opts.KubevirtPlatform.APIServerAddress, err = core.GetAPIServerAddressByNode(ctx, opts.Log); err != nil { return err } diff --git a/product-cli/cmd/cluster/cluster.go b/product-cli/cmd/cluster/cluster.go index ac7f2bb56b..9e91744181 100644 --- a/product-cli/cmd/cluster/cluster.go +++ b/product-cli/cmd/cluster/cluster.go @@ -70,7 +70,8 @@ func NewCreateCommands() *cobra.Command { cmd.PersistentFlags().StringVar(&opts.NetworkType, "network-type", opts.NetworkType, "Enum specifying the cluster SDN provider. Supports either Calico, OVNKubernetes, OpenShiftSDN or Other.") cmd.PersistentFlags().StringVar(&opts.PullSecretFile, "pull-secret", opts.PullSecretFile, "Filepath to a pull secret.") cmd.PersistentFlags().StringVar(&opts.ReleaseImage, "release-image", opts.ReleaseImage, "The OCP release image for the HostedCluster.") - cmd.PersistentFlags().BoolVar(&opts.Render, "render", opts.Render, "Renders the HostedCluster manifest output as YAML to stdout instead of automatically applying the manifests to the management cluster.") + cmd.PersistentFlags().BoolVar(&opts.Render, "render", opts.Render, "Render output as YAML instead of applying") + cmd.PersistentFlags().StringVar(&opts.RenderInto, "render-into", opts.RenderInto, "Render output as YAML into this file instead of applying. If unset, YAML will be output to stdout.") cmd.PersistentFlags().StringArrayVar(&opts.ServiceCIDR, "service-cidr", opts.ServiceCIDR, "The CIDR of the service network. Can be specified multiple times.") cmd.PersistentFlags().BoolVar(&opts.DefaultDual, "default-dual", opts.DefaultDual, "Defines the Service and Cluster CIDRs as dual-stack default values. This flag is ignored if service-cidr or cluster-cidr are set. Cannot be defined with service-cidr or cluster-cidr flag.") cmd.PersistentFlags().StringVar(&opts.SSHKeyFile, "ssh-key", opts.SSHKeyFile, "Filepath to an SSH key file.") diff --git a/test/e2e/util/fixture.go b/test/e2e/util/fixture.go index 7e63edf430..fc4fd5f213 100644 --- a/test/e2e/util/fixture.go +++ b/test/e2e/util/fixture.go @@ -209,7 +209,7 @@ func newClusterDumper(hc *hyperv1.HostedCluster, opts *core.CreateOptions, artif t.Logf("Skipping cluster dump because no artifact directory was provided") return nil } - dumpDir := filepath.Join(artifactDir, strings.ReplaceAll(t.Name(), "/", "_")) + dumpDir := filepath.Join(artifactDir, artifactSubdirFor(t)) switch hc.Spec.Platform.Type { case hyperv1.AWSPlatform: @@ -236,3 +236,7 @@ func newClusterDumper(hc *hyperv1.HostedCluster, opts *core.CreateOptions, artif } } } + +func artifactSubdirFor(t *testing.T) string { + return strings.ReplaceAll(t.Name(), "/", "_") +} diff --git a/test/e2e/util/hypershift_framework.go b/test/e2e/util/hypershift_framework.go index 38e8dd200e..9b68e113a6 100644 --- a/test/e2e/util/hypershift_framework.go +++ b/test/e2e/util/hypershift_framework.go @@ -3,6 +3,7 @@ package util import ( "context" "fmt" + "path/filepath" "runtime/debug" "strings" "testing" @@ -51,7 +52,7 @@ func NewHypershiftTest(t *testing.T, ctx context.Context, test hypershiftTestFun func (h *hypershiftTest) Execute(opts *core.CreateOptions, platform hyperv1.PlatformType, artifactDir string, serviceAccountSigningKey []byte) { // create a hypershift cluster for the test - hostedCluster := h.createHostedCluster(opts, platform, serviceAccountSigningKey) + hostedCluster := h.createHostedCluster(opts, platform, serviceAccountSigningKey, artifactDir) // if cluster creation failed, immediately try and clean up. if h.Failed() { @@ -178,7 +179,7 @@ func (h *hypershiftTest) postTeardown(hostedCluster *hyperv1.HostedCluster, opts }) } -func (h *hypershiftTest) createHostedCluster(opts *core.CreateOptions, platform hyperv1.PlatformType, serviceAccountSigningKey []byte) *hyperv1.HostedCluster { +func (h *hypershiftTest) createHostedCluster(opts *core.CreateOptions, platform hyperv1.PlatformType, serviceAccountSigningKey []byte, artifactDir string) *hyperv1.HostedCluster { h.Logf("createHostedCluster()") g := NewWithT(h.T) @@ -258,7 +259,18 @@ func (h *hypershiftTest) createHostedCluster(opts *core.CreateOptions, platform opts, err = createClusterOpts(h.ctx, h.client, hc, opts) g.Expect(err).NotTo(HaveOccurred(), "failed to generate platform specific cluster options") + // Dump the output from rendering the cluster objects for posterity + opts.Render = true + opts.RenderInto = filepath.Join(artifactDir, artifactSubdirFor(h.T), "manifests.yaml") + h.Logf("Dumping new cluster manifests to %s", opts.RenderInto) + if err := createCluster(h.ctx, hc, opts); err != nil { + h.Errorf("failed to create cluster, tearing down: %v", err) + return hc + } + // Try and create the cluster. If it fails, mark test as failed and return. + opts.Render = false + opts.RenderInto = "" h.Logf("Creating a new cluster. Options: %v", opts) if err := createCluster(h.ctx, hc, opts); err != nil { h.Errorf("failed to create cluster, tearing down: %v", err)