Skip to content

Commit

Permalink
Add crossplane framework for testing
Browse files Browse the repository at this point in the history
Problem: We want a way to verify nginx configuration reliably in our tests. This is especially useful when introducing new policies, without the desire for testing nginx functionality directly.

Solution: Added a framework for getting the nginx config and passing through crossplane into a structured JSON format for easier parsing.

Because we now use a local container for crossplane in our functional tests, we'll only support running these tests in a kind cluster.
  • Loading branch information
sjberman committed Sep 9, 2024
1 parent bf17bd5 commit bb3f186
Show file tree
Hide file tree
Showing 9 changed files with 433 additions and 64 deletions.
11 changes: 11 additions & 0 deletions tests/Dockerfile.crossplane
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
FROM python:3.12-alpine

ARG NGINX_CONF_DIR

RUN pip install crossplane

COPY ${NGINX_CONF_DIR}/nginx.conf /etc/nginx/nginx.conf

USER 101:1001

ENTRYPOINT ["sh"]
17 changes: 8 additions & 9 deletions tests/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ GW_SERVICE_TYPE = NodePort## Service type to use for the gateway
GW_SVC_GKE_INTERNAL = false
NGF_VERSION ?= edge## NGF version to be tested
PULL_POLICY = Never## Pull policy for the images
NGINX_CONF_DIR = internal/mode/static/nginx/conf
PROVISIONER_MANIFEST = conformance/provisioner/provisioner.yaml
SUPPORTED_EXTENDED_FEATURES = HTTPRouteQueryParamMatching,HTTPRouteMethodMatching,HTTPRoutePortRedirect,HTTPRouteSchemeRedirect,HTTPRouteHostRewrite,HTTPRoutePathRewrite,GatewayPort8080,HTTPRouteResponseHeaderModification
STANDARD_CONFORMANCE_PROFILES = GATEWAY-HTTP,GATEWAY-GRPC
Expand All @@ -38,6 +39,10 @@ update-go-modules: ## Update the gateway-api go modules to latest main version
build-test-runner-image: ## Build conformance test runner image
docker build -t $(CONFORMANCE_PREFIX):$(CONFORMANCE_TAG) -f conformance/Dockerfile .

.PHONY: build-crossplane-image
build-crossplane-image: ## Build the crossplane image
docker build --build-arg NGINX_CONF_DIR=$(NGINX_CONF_DIR) -t nginx-crossplane:latest -f Dockerfile.crossplane ..

.PHONY: run-conformance-tests
run-conformance-tests: ## Run conformance tests
kind load docker-image $(CONFORMANCE_PREFIX):$(CONFORMANCE_TAG) --name $(CLUSTER_NAME)
Expand Down Expand Up @@ -80,9 +85,6 @@ ifeq ($(PLUS_ENABLED),true)
NGINX_PREFIX := $(NGINX_PLUS_PREFIX)
endif

.PHONY: setup-gcp-and-run-tests
setup-gcp-and-run-tests: create-gke-router create-and-setup-vm run-tests-on-vm ## Create and setup a GKE router and GCP VM for tests and run the functional tests

.PHONY: setup-gcp-and-run-nfr-tests
setup-gcp-and-run-nfr-tests: create-gke-router create-and-setup-vm nfr-test ## Create and setup a GKE router and GCP VM for tests and run the NFR tests

Expand All @@ -102,13 +104,9 @@ create-gke-router: ## Create a GKE router to allow egress traffic from private n
sync-files-to-vm: ## Syncs your local NGF files with the NGF repo on the VM
./scripts/sync-files-to-vm.sh

.PHONY: run-tests-on-vm
run-tests-on-vm: ## Run the functional tests on a GCP VM
./scripts/run-tests-gcp-vm.sh

.PHONY: nfr-test
nfr-test: ## Run the NFR tests on a GCP VM
NFR=true CI=$(CI) ./scripts/run-tests-gcp-vm.sh
CI=$(CI) ./scripts/run-tests-gcp-vm.sh

.PHONY: start-longevity-test
start-longevity-test: export START_LONGEVITY=true
Expand All @@ -130,7 +128,8 @@ stop-longevity-test: nfr-test ## Stop the longevity test and collects results
--is-gke-internal-lb=$(GW_SVC_GKE_INTERNAL)

.PHONY: test
test: ## Runs the functional tests on your default k8s cluster
test: build-crossplane-image ## Runs the functional tests on your kind k8s cluster
kind load docker-image nginx-crossplane:latest --name $(CLUSTER_NAME)
go run github.com/onsi/ginkgo/v2/ginkgo --randomize-all --randomize-suites --keep-going --fail-on-pending \
--trace -r -v --buildvcs --force-newlines $(GITHUB_OUTPUT) \
--label-filter "functional" $(GINKGO_FLAGS) ./suite -- \
Expand Down
40 changes: 9 additions & 31 deletions tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,8 @@ This directory contains the tests for NGINX Gateway Fabric. The tests are divide
- [System Testing](#system-testing)
- [Logging in tests](#logging-in-tests)
- [Step 1 - Run the tests](#step-1---run-the-tests)
- [1a - Run the functional tests locally](#1a---run-the-functional-tests-locally)
- [1b - Run the tests on a GKE cluster from a GCP VM](#1b---run-the-tests-on-a-gke-cluster-from-a-gcp-vm)
- [Functional Tests](#functional-tests)
- [NFR tests](#nfr-tests)
- [Run the functional tests locally](#run-the-functional-tests-locally)
- [Run the NFR tests on a GKE cluster from a GCP VM](#run-the-nfr-tests-on-a-gke-cluster-from-a-gcp-vm)
- [Longevity testing](#longevity-testing)
- [Common test amendments](#common-test-amendments)
- [Step 2 - Cleanup](#step-2---cleanup)
Expand All @@ -47,7 +45,7 @@ This directory contains the tests for NGINX Gateway Fabric. The tests are divide
- [yq](https://github.com/mikefarah/yq/#install)
- Make.

If running NFR tests, or running functional tests in GKE:
If running NFR tests:

- The [gcloud CLI](https://cloud.google.com/sdk/docs/install)
- A GKE cluster (if `master-authorized-networks` is enabled, please set `ADD_VM_IP_AUTH_NETWORKS=true` in your vars.env file)
Expand All @@ -59,9 +57,7 @@ All the commands below are executed from the `tests` directory. You can see all

### Step 1 - Create a Kubernetes cluster

This can be done in a cloud provider of choice, or locally using `kind`.

**Important**: NFR tests can only be run on a GKE cluster.
**Important**: Functional/conformance tests can only be run on a `kind` cluster. NFR tests can only be run on a GKE cluster.

To create a local `kind` cluster:

Expand Down Expand Up @@ -237,7 +233,7 @@ When running locally, the tests create a port-forward from your NGF Pod to local
test framework. Traffic is sent over this port. If running on a GCP VM targeting a GKE cluster, the tests will create an
internal LoadBalancer service which will receive the test traffic.

**Important**: NFR tests can only be run on a GKE cluster.
**Important**: Functional tests can only be run on a `kind` cluster. NFR tests can only be run on a GKE cluster.

Directory structure is as follows:

Expand All @@ -252,7 +248,7 @@ To log in the tests, use the `GinkgoWriter` interface described here: https://on

### Step 1 - Run the tests

#### 1a - Run the functional tests locally
#### Run the functional tests locally

```makefile
make test TAG=$(whoami)
Expand All @@ -273,9 +269,7 @@ To run the telemetry test:
make test TAG=$(whoami) GINKGO_LABEL=telemetry
```

#### 1b - Run the tests on a GKE cluster from a GCP VM

This step only applies if you are running the NFR tests, or would like to run the functional tests on a GKE cluster from a GCP based VM.
#### Run the NFR tests on a GKE cluster from a GCP VM

Before running the below `make` commands, copy the `scripts/vars.env-example` file to `scripts/vars.env` and populate the
required env vars. `GKE_SVC_ACCOUNT` needs to be the name of a service account that has Kubernetes admin permissions.
Expand All @@ -292,7 +286,7 @@ To just set up the VM with no router (this will not run the tests):
make create-and-setup-vm
```

Otherwise, you can set up the VM, router, and run the tests with a single command. See the options in the sections below.
Otherwise, you can set up the VM, router, and run the tests with a single command. See the options below.

By default, the tests run using the version of NGF that was `git cloned` during the setup. If you want to make
incremental changes and copy your local changes to the VM to test, you can run
Expand All @@ -301,22 +295,6 @@ incremental changes and copy your local changes to the VM to test, you can run
make sync-files-to-vm
```

#### Functional Tests

To set up the GCP environment with the router and VM and then run the tests, run the following command:

```makefile
make setup-gcp-and-run-tests
```

To use an existing VM to run the tests, run the following

```makefile
make run-tests-on-vm
```

#### NFR tests

To set up the GCP environment with the router and VM and then run the tests, run the following command:

```makefile
Expand Down Expand Up @@ -374,7 +352,7 @@ or to pass a specific flag, e.g. run a specific test, use the GINKGO_FLAGS varia
make test TAG=$(whoami) GINKGO_FLAGS='-ginkgo.focus "writes the system info to a results file"'
```

> Note: if filtering on NFR tests (or functional tests on GKE), set the filter in the appropriate field in your `vars.env` file.
> Note: if filtering on NFR tests, set the filter in the appropriate field in your `vars.env` file.
If you are running the tests in GCP, add your required label/ flags to `scripts/var.env`.

Expand Down
218 changes: 218 additions & 0 deletions tests/framework/crossplane.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,218 @@
package framework

import (
"context"
"fmt"
"net/http"
"strings"
"time"

core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
)

// ExpectedNginxField contains an nginx directive key and value,
// and the expected file, server, and location block that it should exist in.
type ExpectedNginxField struct {
// Key is the directive name.
Key string
// Value is the value for the directive. Can be the full value or a substring.
Value string
// File is the file name that should contain the directive. Can be a full filename or a substring.
File string
// Location is the location name that the directive should exist in.
Location string
// Servers are the server names that the directive should exist in.
Servers []string
// ValueSubstringAllowed allows the expected value to be a substring of the real value.
// This makes it easier for cases when real values are complex file names or contain things we
// don't care about, and we just want to check if a substring exists.
ValueSubstringAllowed bool
}

// ValidateNginxFieldExists accepts the nginx config and the configuration for the expected field,
// and returns whether or not that field exists where it should.
func ValidateNginxFieldExists(conf *Payload, expFieldCfg ExpectedNginxField) bool {
for _, config := range conf.Config {
if !strings.Contains(config.File, expFieldCfg.File) {
continue
}

for _, directive := range config.Parsed {
if len(expFieldCfg.Servers) == 0 {
if expFieldCfg.fieldFound(directive) {
return true
}
continue
}

for _, serverName := range expFieldCfg.Servers {
if directive.Directive == "server" && getServerName(directive.Block) == serverName {
for _, serverDirective := range directive.Block {
if expFieldCfg.Location == "" && expFieldCfg.fieldFound(serverDirective) {
return true
} else if serverDirective.Directive == "location" &&
fieldExistsInLocation(serverDirective, expFieldCfg) {
return true
}
}
}
}
}
}

return false
}

func getServerName(serverBlock Directives) string {
for _, directive := range serverBlock {
if directive.Directive == "server_name" {
return directive.Args[0]
}
}

return ""
}

func (e ExpectedNginxField) fieldFound(directive *Directive) bool {
arg := strings.Join(directive.Args, " ")

valueMatch := arg == e.Value
if e.ValueSubstringAllowed {
valueMatch = strings.Contains(arg, e.Value)
}

return directive.Directive == e.Key && valueMatch
}

func fieldExistsInLocation(serverDirective *Directive, expFieldCfg ExpectedNginxField) bool {
// location could start with '=', so get the last element which is the path
loc := serverDirective.Args[len(serverDirective.Args)-1]
if loc == expFieldCfg.Location {
for _, locDirective := range serverDirective.Block {
if expFieldCfg.fieldFound(locDirective) {
return true
}
}
}

return false
}

// injectCrossplaneContainer adds an ephemeral container that contains crossplane for parsing
// nginx config. It attaches to the nginx container and shares volumes with it.
func injectCrossplaneContainer(
k8sClient kubernetes.Interface,
timeout time.Duration,
ngfPodName,
namespace string,
) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()

pod := &core.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: ngfPodName,
Namespace: namespace,
},
Spec: core.PodSpec{
EphemeralContainers: []core.EphemeralContainer{
{
TargetContainerName: "nginx",
EphemeralContainerCommon: core.EphemeralContainerCommon{
Name: "crossplane",
Image: "nginx-crossplane:latest",
ImagePullPolicy: "Never",
Stdin: true,
VolumeMounts: []core.VolumeMount{
{
MountPath: "/etc/nginx/conf.d",
Name: "nginx-conf",
},
{
MountPath: "/etc/nginx/stream-conf.d",
Name: "nginx-stream-conf",
},
{
MountPath: "/etc/nginx/module-includes",
Name: "module-includes",
},
{
MountPath: "/etc/nginx/secrets",
Name: "nginx-secrets",
},
{
MountPath: "/etc/nginx/includes",
Name: "nginx-includes",
},
},
},
},
},
},
}

podClient := k8sClient.CoreV1().Pods(namespace)
if _, err := podClient.UpdateEphemeralContainers(ctx, ngfPodName, pod, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("error adding ephemeral container: %w", err)
}

return nil
}

// createCrossplaneExecutor creates the executor for the crossplane command.
func createCrossplaneExecutor(
k8sClient kubernetes.Interface,
k8sConfig *rest.Config,
ngfPodName,
namespace string,
) (remotecommand.Executor, error) {
cmd := []string{"crossplane", "parse", "/etc/nginx/nginx.conf"}
opts := &core.PodExecOptions{
Command: cmd,
Container: "crossplane",
Stdout: true,
Stderr: true,
}

req := k8sClient.CoreV1().RESTClient().Post().
Resource("pods").
SubResource("exec").
Name(ngfPodName).
Namespace(namespace).
VersionedParams(opts, scheme.ParameterCodec)

exec, err := remotecommand.NewSPDYExecutor(k8sConfig, http.MethodPost, req.URL())
if err != nil {
return nil, fmt.Errorf("error creating executor: %w", err)
}

return exec, nil
}

// The following types are copied from https://github.com/nginxinc/nginx-go-crossplane,
// with unnecessary fields stripped out.
type Payload struct {
Config []Config `json:"config"`
}

type Config struct {
File string `json:"file"`
Parsed Directives `json:"parsed"`
}

type Directive struct {
Comment *string `json:"comment,omitempty"`
Directive string `json:"directive"`
File string `json:"file,omitempty"`
Args []string `json:"args"`
Includes []int `json:"includes,omitempty"`
Block Directives `json:"block,omitempty"`
Line int `json:"line"`
}

type Directives []*Directive
Loading

0 comments on commit bb3f186

Please sign in to comment.