Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement kubectl helper integration tests #342

Merged
merged 3 commits into from
Sep 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,11 @@ integration-agent: agent-image
integration-kubernetes:
go test -tags integration ./pkg/kubernetes/...

integration: integration-agent integration-kubernetes
integration-kubectl:
go test -tags integration ./pkg/testutils/e2e/kubectl/

integration: integration-agent integration-kubernetes integration-kubectl


# Running with -buildvcs=false works around the issue of `go list all` failing when git, which runs as root inside
# the container, refuses to operate on the disruptor source tree as it is not owned by the same user (root).
Expand Down
56 changes: 6 additions & 50 deletions pkg/kubernetes/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,13 @@ package kubernetes
import (
"context"
"fmt"
"regexp"
"testing"
"time"

"github.com/grafana/xk6-disruptor/pkg/kubernetes/helpers"
"github.com/grafana/xk6-disruptor/pkg/testutils/e2e/fixtures"
"github.com/grafana/xk6-disruptor/pkg/testutils/k3sutils"

"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/k3s"

Expand All @@ -36,51 +37,6 @@ func createRandomTestNamespace(k8s Kubernetes) (string, error) {
return ns.Name, nil
}

// regexMatcher process lines from logs and notifies when a match is found
type regexMatcher struct {
exp *regexp.Regexp
found chan (bool)
}

// Accept implements the LogConsumer interface
func (a *regexMatcher) Accept(log testcontainers.Log) {
if a.exp.MatchString(string(log.Content)) {
a.found <- true
}
}

// waitForRegex waits until a match for the given regex is found in the log produced by the container
func waitForRegex(ctx context.Context, container testcontainers.Container, exp string, timeout time.Duration) error {
regexp, err := regexp.Compile(exp)
if err != nil {
return err
}

found := make(chan bool)
container.FollowOutput(&regexMatcher{
exp: regexp,
found: found,
})

err = container.StartLogProducer(ctx)
if err != nil {
return err
}
defer func() {
_ = container.StopLogProducer()
}()

expired := time.After(timeout)
select {
case <-found:
return nil
case <-expired:
return fmt.Errorf("timeout waiting for a match")
case <-ctx.Done():
return ctx.Err()
}
}

func Test_Kubernetes(t *testing.T) {
t.Parallel()

Expand All @@ -95,7 +51,7 @@ func Test_Kubernetes(t *testing.T) {
// see this issue for more details:
// https://github.com/testcontainers/testcontainers-go/issues/1547
timeout := time.Second * 30
err = waitForRegex(ctx, container, ".*Node controller sync successful.*", timeout)
err = k3sutils.WaitForRegex(ctx, container, ".*Node controller sync successful.*", timeout)
if err != nil {
t.Fatalf("failed waiting for cluster ready: %s", err)
}
Expand All @@ -117,7 +73,7 @@ func Test_Kubernetes(t *testing.T) {
t.Fatalf("failed to create rest client for kubernetes : %s", err)
}

k8s, err := newFromConfig(restcfg)
k8s, err := NewFromConfig(restcfg)
if err != nil {
t.Fatalf("error creating kubernetes client: %v", err)
}
Expand Down Expand Up @@ -321,7 +277,7 @@ func Test_UnsupportedKubernetesVersion(t *testing.T) {
// see this issue for more details:
// https://github.com/testcontainers/testcontainers-go/issues/1547
timeout := time.Second * 30
err = waitForRegex(ctx, container, ".*Node controller sync successful.*", timeout)
err = k3sutils.WaitForRegex(ctx, container, ".*Node controller sync successful.*", timeout)
if err != nil {
t.Fatalf("failed waiting for cluster ready: %s", err)
}
Expand All @@ -343,7 +299,7 @@ func Test_UnsupportedKubernetesVersion(t *testing.T) {
t.Fatalf("failed to create rest client for kubernetes : %s", err)
}

_, err = newFromConfig(restcfg)
_, err = NewFromConfig(restcfg)
if err == nil {
t.Errorf("should had failed creating kubernetes client")
return
Expand Down
8 changes: 4 additions & 4 deletions pkg/kubernetes/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ type k8s struct {
kubernetes.Interface
}

// newFromConfig returns a Kubernetes instance configured with the provided kubeconfig.
func newFromConfig(config *rest.Config) (Kubernetes, error) {
// NewFromConfig returns a Kubernetes instance configured with the provided kubeconfig.
func NewFromConfig(config *rest.Config) (Kubernetes, error) {
// As per the discussion in [1] client side rate limiting is no longer required.
// Setting a large limit
// [1] https://github.com/kubernetes/kubernetes/issues/111880
Expand Down Expand Up @@ -62,7 +62,7 @@ func NewFromKubeconfig(kubeconfig string) (Kubernetes, error) {
return nil, err
}

return newFromConfig(config)
return NewFromConfig(config)
}

// New returns a Kubernetes instance or an error when no config is eligible to be used.
Expand All @@ -73,7 +73,7 @@ func NewFromKubeconfig(kubeconfig string) (Kubernetes, error) {
func New() (Kubernetes, error) {
k8sConfig, err := rest.InClusterConfig()
if err == nil {
return newFromConfig(k8sConfig)
return NewFromConfig(k8sConfig)
}

if !errors.Is(err, rest.ErrNotInCluster) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//go:build e2e
// +build e2e
//go:build integration
// +build integration

package e2e
package kubectl

import (
"bytes"
Expand All @@ -12,35 +12,59 @@ import (
"time"

"github.com/grafana/xk6-disruptor/pkg/kubernetes"
"github.com/grafana/xk6-disruptor/pkg/testutils/e2e/cluster"
"github.com/grafana/xk6-disruptor/pkg/testutils/e2e/deploy"
"github.com/grafana/xk6-disruptor/pkg/testutils/e2e/kubectl"
"github.com/grafana/xk6-disruptor/pkg/testutils/e2e/kubernetes/namespace"
"github.com/grafana/xk6-disruptor/pkg/testutils/k3sutils"
"github.com/grafana/xk6-disruptor/pkg/testutils/kubernetes/builders"


"github.com/testcontainers/testcontainers-go/modules/k3s"

"k8s.io/client-go/tools/clientcmd"
)

func Test_Kubectl(t *testing.T) {
t.Parallel()

cluster, err := cluster.BuildE2eCluster(
cluster.DefaultE2eClusterConfig(),
)
ctx := context.Background()

container, err := k3s.RunContainer(ctx)
if err != nil {
t.Errorf("failed to create cluster: %v", err)
return
t.Fatal(err)
}

// wait for the api server to complete initialization.
// see this issue for more details:
// https://github.com/testcontainers/testcontainers-go/issues/1547
timeout := time.Second * 30
err = k3sutils.WaitForRegex(ctx, container, ".*Node controller sync successful.*", timeout)
if err != nil {
t.Fatalf("failed waiting for cluster ready: %s", err)
}

// Clean up the container after the test is complete
t.Cleanup(func() {
_ = cluster.Cleanup()
if err = container.Terminate(ctx); err != nil {
t.Fatalf("failed to terminate container: %s", err)
}
})

k8s, err := kubernetes.NewFromKubeconfig(cluster.Kubeconfig())
kubeConfigYaml, err := container.GetKubeConfig(ctx)
if err != nil {
t.Fatalf("failed to get kube-config : %s", err)
}

restcfg, err := clientcmd.RESTConfigFromKubeConfig(kubeConfigYaml)
if err != nil {
t.Fatalf("failed to create rest client for kubernetes : %s", err)
}

k8s, err := kubernetes.NewFromConfig(restcfg)
if err != nil {
t.Errorf("error creating kubernetes client: %v", err)
return
t.Fatalf("error creating kubernetes client: %v", err)
}

// Test Wait Pod Running
t.Run("Test local random port", func(t *testing.T) {
t.Run("Test port forwarding", func(t *testing.T) {
namespace, err := namespace.CreateTestNamespace(context.TODO(), t, k8s.Client())
if err != nil {
t.Errorf("failed to create test namespace: %v", err)
Expand All @@ -63,7 +87,7 @@ func Test_Kubectl(t *testing.T) {
return
}

client, err := kubectl.NewFromKubeconfig(context.TODO(), cluster.Kubeconfig())
client, err := NewForConfig(context.TODO(), restcfg)
if err != nil {
t.Errorf("failed to create kubectl client: %v", err)
return
Expand Down
57 changes: 57 additions & 0 deletions pkg/testutils/k3sutils/k3sutils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
// Package k3sutils implements helper functions for tests using TestContainer's k3s module
package k3sutils

import (
"context"
"fmt"
"regexp"
"time"

"github.com/testcontainers/testcontainers-go"
)

// regexMatcher process lines from logs and notifies when a match is found
type regexMatcher struct {
exp *regexp.Regexp
found chan (bool)
}

// Accept implements the LogConsumer interface
func (a *regexMatcher) Accept(log testcontainers.Log) {
if a.exp.MatchString(string(log.Content)) {
a.found <- true
}
}

// WaitForRegex waits until a match for the given regex is found in the log produced by the container
// This utility function is a workaround for https://github.com/testcontainers/testcontainers-go/issues/1541
func WaitForRegex(ctx context.Context, container testcontainers.Container, exp string, timeout time.Duration) error {
regexp, err := regexp.Compile(exp)
if err != nil {
return err
}

found := make(chan bool)
container.FollowOutput(&regexMatcher{
exp: regexp,
found: found,
})

err = container.StartLogProducer(ctx)
if err != nil {
return err
}
defer func() {
_ = container.StopLogProducer()
}()

expired := time.After(timeout)
select {
case <-found:
return nil
case <-expired:
return fmt.Errorf("timeout waiting for a match")
case <-ctx.Done():
return ctx.Err()
}
}
Loading