Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve CI and add check stage #98

Merged
merged 3 commits into from
May 6, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 10 additions & 7 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,20 +1,23 @@
# Binaries for programs and plugins
# Binaries for programs and plugins.
*.exe
*.dll
*.so
*.dylib

# Test binary, build with `go test -c`
# Test binary, build with `go test -c`.
*.test

# Output of the go coverage tool, specifically when used with LiteIDE
# Output of the go coverage tool, specifically when used with LiteIDE.
*.out

# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736.
.glide/

# binary
# Binary.
bin/

# vendor
vendor/
# Vendor.
vendor/

# Test coverage.
.test_coverage.txt
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ before_install:
- curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/

script:
- make ci
- make check && make ci

env:
global:
Expand Down
90 changes: 24 additions & 66 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,102 +1,60 @@

# Name of this service/application
SERVICE_NAME := kooper

# Path of the go service inside docker
DOCKER_GO_SERVICE_PATH := /src

# Shell to use for running scripts
SHELL := $(shell which bash)

# Get docker path or an empty string
DOCKER := $(shell command -v docker)

# Get the main unix group for the user running make (to be used by docker-compose later)
OSTYPE := $(shell uname)
GID := $(shell id -g)

# Get the unix user id for the user running make (to be used by docker-compose later)
UID := $(shell id -u)

# cmds
UNIT_TEST_CMD := ./hack/scripts/unit-test.sh
INTEGRATION_TEST_CMD := ./hack/scripts/integration-test.sh
CI_INTEGRATION_TEST_CMD := ./hack/scripts/integration-test-kind.sh
MOCKS_CMD := ./hack/scripts/mockgen.sh
DOCKER_RUN_CMD := docker run -v ${PWD}:$(DOCKER_GO_SERVICE_PATH) --rm -it $(SERVICE_NAME)
RUN_EXAMPLE_POD_ECHO := go run ./examples/echo-pod-controller/cmd/* --development
RUN_EXAMPLE_POD_ECHO_ONEFILE := go run ./examples/onefile-echo-pod-controller/main.go --development
RUN_EXAMPLE_POD_TERM := go run ./examples/pod-terminator-operator/cmd/* --development
DOCKER_RUN_CMD := docker run --env ostype=$(OSTYPE) -v ${PWD}:/src --rm -it ${SERVICE_NAME}
DEPS_CMD := go mod tidy
K8S_VERSION := "1.15.10"
SET_K8S_DEPS_CMD := go mod edit \
-require=k8s.io/apiextensions-apiserver@kubernetes-${K8S_VERSION} \
-require=k8s.io/client-go@kubernetes-${K8S_VERSION} \
-require=k8s.io/apimachinery@kubernetes-${K8S_VERSION} \
-require=k8s.io/api@kubernetes-${K8S_VERSION} && \
$(DEPS_CMD)
CHECK_CMD := ./hack/scripts/check.sh


# environment dirs
DEV_DIR := docker/dev
help: ## Show this help
@echo "Help"
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-20s\033[93m %s\n", $$1, $$2}'

# The default action of this Makefile is to build the development docker image
.PHONY: default
default: build
default: help

# Test if the dependencies we need to run this Makefile are installed
.PHONY: deps-development
deps-development:
ifndef DOCKER
@echo "Docker is not available. Please install docker"
@exit 1
endif

# Build the development docker image
.PHONY: build
build:
build: ## Build the development docker image.
docker build -t $(SERVICE_NAME) --build-arg uid=$(UID) --build-arg gid=$(GID) -f ./docker/dev/Dockerfile .

# Dependency stuff.
.PHONY: set-k8s-deps
set-k8s-deps:
$(SET_K8S_DEPS_CMD)

.PHONY: deps
deps:
deps: ## Updates the required dependencies.
$(DEPS_CMD)

# Test stuff in dev
.PHONY: unit-test
unit-test: build
$(DOCKER_RUN_CMD) /bin/sh -c '$(UNIT_TEST_CMD)'
.PHONY: integration-test
integration-test: build
integration-test: build ## Runs integration tests out of CI.
echo "[WARNING] Requires a kubernetes cluster configured (and running) on your kubeconfig!!"
$(INTEGRATION_TEST_CMD)

.PHONY: test
test: unit-test
test: build ## Runs unit tests out of CI.
$(DOCKER_RUN_CMD) /bin/sh -c '$(UNIT_TEST_CMD)'

.PHONY: check
check: build ## Runs checks.
@$(DOCKER_RUN_CMD) /bin/sh -c '$(CHECK_CMD)'

# Test stuff in ci
.PHONY: ci-unit-test
ci-unit-test:
ci-unit-test: ## Runs unit tests in CI.
$(UNIT_TEST_CMD)

.PHONY: ci-integration-test
ci-integration-test:
ci-integration-test: ## Runs integration tests in CI.
$(CI_INTEGRATION_TEST_CMD)
.PHONY: ci

.PHONY: ci ## Runs all tests in CI.
ci: ci-unit-test ci-integration-test

# Mocks stuff in dev
.PHONY: mocks
mocks: build
mocks: build ## Generates mocks.
$(DOCKER_RUN_CMD) /bin/sh -c '$(MOCKS_CMD)'

# Run examples.
.PHONY: controller-example
controller-example:
$(RUN_EXAMPLE_POD_ECHO)
.PHONY: controller-example-onefile
controller-example-onefile:
$(RUN_EXAMPLE_POD_ECHO_ONEFILE)
.PHONY: operator-example
operator-example:
$(RUN_EXAMPLE_POD_TERM)
13 changes: 6 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -134,17 +134,16 @@ Check [Leader election](docs/leader-election.md).

### Garbage collection

Kooper only handles the events of resources that exist that are triggered when these resources change or are created. There is no delete event, so in order to clean the resources you have 2 ways of doing these:
Kooper only handles the events of resources that exist, these are triggered when the resources being watched are updated or created. There is no delete event, so in order to clean the resources you have 2 ways of doing these:

If your controller creates as a side effect new Kubernetes resources you can use [owner references][owner-ref] on the created objects.

On the other hand if you want a more flexible clean up process (e.g clean from a database or a 3rd party service) you can use [finalizers], check the [pod-terminator-operator][finalizer-example] example.
- If your controller creates as a side effect new Kubernetes resources you can use [owner references][owner-ref] on the created objects.
- If you want a more flexible clean up process (e.g clean from a database or a 3rd party service) you can use [finalizers], check the [pod-terminator-operator][finalizer-example] example.

### Multiresource or secondary resources

Sometimes we have controllers that work on a main or primary resource and we also want to handle the events of a secondary resource that is based on the first one. For example, a deployment controller that watches the pods that belong to the deployment handled.
Sometimes we have controllers that work on a main or primary resource and we also want to handle the events of a secondary resource that is based on the first one. For example, a deployment controller that watches the pods (secondary) that belong to the deployment (primary) handled.

After using them and experiencing with controllers, we though that is not necesary becase:
After using multiresource controllers/retrievers, we though that we don't need a multiresource controller is not necesary becase:

- Adds complexity.
- Adds corner cases, this translates in bugs, e.g
Expand All @@ -154,7 +153,7 @@ After using them and experiencing with controllers, we though that is not necesa
- An error on one of the retrieval types stops all the controller process and not only the one based on that type.
- The benefit of having this is to reuse the handler (we already can do this, a `Handler` is easy to reuse).

The solution to this problems embraces simplicity once again, and mainly is to create multiple controllers using the same `Handler` but with a different `ListerWatcher`, the `Handler` API is easy enough to reuse it across multiple controllers, check an [example][multiresource-example] of creating a multiresource controller(s). Also, this comes with extra benefits:
The solution to these problems embrace simplicity once again, and mainly is creating multiple controllers using the same `Handler`, each controller with a different `ListerWatcher`. The `Handler` API is easy enough to reuse it across multiple controllers, check an [example][multiresource-example]. Also, this comes with extra benefits:

- Different controller interval depending on the type (fast changing secondary objects can reconcile faster than the primary one, or viceversa).
- Wrap the controller handler with a middlewre only for a particular type.
Expand Down
47 changes: 9 additions & 38 deletions controller/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package controller_test

import (
"fmt"
"sync"
"testing"
"time"

Expand All @@ -23,12 +24,6 @@ import (
mcontroller "github.com/spotahome/kooper/mocks/controller"
)

// Namespace knows how to retrieve namespaces.
type namespaceRetriever struct {
lw cache.ListerWatcher
obj runtime.Object
}

// NewNamespace returns a Namespace retriever.
func newNamespaceRetriever(client kubernetes.Interface) controller.Retriever {
return controller.MustRetrieverFromListerWatcher(&cache.ListWatch{
Expand All @@ -41,38 +36,6 @@ func newNamespaceRetriever(client kubernetes.Interface) controller.Retriever {
})
}

// GetListerWatcher knows how to retrieve Namespaces.
func (n *namespaceRetriever) GetListerWatcher() cache.ListerWatcher {
return n.lw
}

// GetObject returns the namespace Object.
func (n *namespaceRetriever) GetObject() runtime.Object {
return n.obj
}

func onKubeClientWatchNamespaceReturn(client *fake.Clientset, adds []*corev1.Namespace, updates []*corev1.Namespace, deletes []*corev1.Namespace) {
w := watch.NewFake()
client.AddWatchReactor("namespaces", func(action kubetesting.Action) (bool, watch.Interface, error) {
return true, w, nil
})

go func() {
// Adds.
for _, obj := range adds {
w.Add(obj)
}
// Updates.
for _, obj := range updates {
w.Modify(obj)
}
// Deletes.
for _, obj := range deletes {
w.Delete(obj)
}
}()
}

func onKubeClientListNamespaceReturn(client *fake.Clientset, nss *corev1.NamespaceList) {
client.AddReactor("list", "namespaces", func(action kubetesting.Action) (bool, runtime.Object, error) {
return true, nss, nil
Expand Down Expand Up @@ -133,9 +96,14 @@ func TestGenericControllerHandle(t *testing.T) {
// Mock our handler and set expects.
callHandling := 0 // used to track the number of calls.
mh := &mcontroller.Handler{}

var mu sync.Mutex
for _, ns := range test.expNSAdds {
mh.On("Handle", mock.Anything, ns).Once().Return(nil).Run(func(args mock.Arguments) {
mu.Lock()
defer mu.Unlock()
callHandling++

// Check last call, if is the last call expected then stop the controller so
// we can assert the expectations of the calls and finish the test.
if callHandling == len(test.expNSAdds) {
Expand Down Expand Up @@ -205,9 +173,12 @@ func TestGenericControllerErrorRetries(t *testing.T) {
err := fmt.Errorf("wanted error")

// Expect all the retries
var mu sync.Mutex
for range test.nsList.Items {
callsPerNS := test.retryNumber + 1 // initial call + retries.
mh.On("Handle", mock.Anything, mock.Anything).Return(err).Times(callsPerNS).Run(func(args mock.Arguments) {
mu.Lock()
defer mu.Unlock()
totalCalls--
// Check last call, if is the last call expected then stop the controller so
// we can assert the expectations of the calls and finish the test.
Expand Down
34 changes: 21 additions & 13 deletions docker/dev/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,27 +1,35 @@
FROM golang:1.13-alpine
FROM golang:1.14

RUN apk --no-cache add \
g++ \
git
ARG GOLANGCI_LINT_VERSION="1.25.0"
ARG ostype=Linux

# Mock creator
RUN go get -u github.com/vektra/mockery/.../
RUN apt-get update && apt-get install -y \
git \
bash \
zip

# Create user

RUN wget https://github.com/golangci/golangci-lint/releases/download/v${GOLANGCI_LINT_VERSION}/golangci-lint-${GOLANGCI_LINT_VERSION}-linux-amd64.tar.gz && \
tar zxvf golangci-lint-${GOLANGCI_LINT_VERSION}-linux-amd64.tar.gz --strip 1 -C /usr/local/bin/ && \
rm golangci-lint-${GOLANGCI_LINT_VERSION}-linux-amd64.tar.gz

RUN go get -u github.com/vektra/mockery/...

# Create user.
ARG uid=1000
ARG gid=1000
RUN addgroup -g $gid kooper && \
adduser -D -u $uid -G kooper kooper

RUN bash -c 'if [ ${ostype} == Linux ]; then addgroup -gid $gid app; else addgroup app; fi && \
adduser --disabled-password -uid $uid --ingroup app --gecos "" app && \
chown app:app -R /go'

# Fill go mod cache.
RUN mkdir /tmp/cache
COPY go.mod /tmp/cache
COPY go.sum /tmp/cache
RUN chown app:app -R /tmp/cache
USER app
RUN cd /tmp/cache && \
go mod download

RUN chown kooper:kooper -R /go
USER kooper

WORKDIR /src
WORKDIR /src
6 changes: 6 additions & 0 deletions hack/scripts/check.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/usr/bin/env sh

set -o errexit
set -o nounset

golangci-lint run -E goimports --timeout 3m
3 changes: 2 additions & 1 deletion hack/scripts/unit-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,5 @@
set -o errexit
set -o nounset

go test `go list ./... | grep -v vendor` -v
go test -race -coverprofile=.test_coverage.txt ./...
go tool cover -func=.test_coverage.txt | tail -n1 | awk '{print "Total test coverage: " $3}'
1 change: 0 additions & 1 deletion test/integration/helper/cli/cli.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import (
"k8s.io/client-go/util/homedir"
)


// GetK8sClient returns k8s client.
func GetK8sClient(kubehome string) (kubernetes.Interface, error) {
// Try fallbacks.
Expand Down