From 8388c8422e52b46d9900d316082cd653be6d7b57 Mon Sep 17 00:00:00 2001 From: Mark Mandel Date: Wed, 3 Jan 2018 20:59:09 -0800 Subject: [PATCH] Development and Deployment to Minikube You can now develop Agon, and deploy Agon to minikube for local testing and development. The major issue was that Minikube doesn't expose the ExternalIP for the Node it runs - so Agon will now fallback to the InternalIP (with a warning) if it can't find the ExternalIP. *Breaking change* - Agon build tools will now look in ~/.kube (like all other kubernetes tools) for Kubernetes credentials. This was to streamline auth of minikube and other Kubernetes clusters by conforming to what Kubernetes tooling usually expects. If you are running a GKE cluster, you will need to run `make gcloud-auth-cluster` again to re-download the kubectl credentials. Closes #30 --- build/Makefile | 91 +++++++++++++++-- build/README.md | 112 +++++++++++++++++++-- build/install.yaml | 4 +- examples/cpp-simple/gameserver.yaml | 2 +- examples/simple-udp/server/gameserver.yaml | 2 +- gameservers/controller/controller.go | 21 +++- gameservers/controller/controller_test.go | 62 ++++++++---- 7 files changed, 247 insertions(+), 47 deletions(-) diff --git a/build/Makefile b/build/Makefile index 36dceca979..921bd4dab1 100644 --- a/build/Makefile +++ b/build/Makefile @@ -36,9 +36,11 @@ VERSION ?= $(base_version)-$(shell git rev-parse --short HEAD) # The registry that is being used to store docker images REGISTRY ?= gcr.io/agon-images # Where the kubectl configuration files are being stored -KUBECONFIG ?= $(build_path)/.kube +KUBEPATH ?= ~/.kube # The (gcloud) test cluster that is being worked against CLUSTER_NAME ?= test-cluster +# the profile to use when developing on minikube +MINIKUBE_PROFILE ?= agon # Directory that this Makefile is in. mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST))) @@ -48,7 +50,7 @@ agon_path := $(realpath $(build_path)/..) agon_package = github.com/agonio/agon mount_path = /go/src/$(agon_package) common_mounts = -v $(build_path)/.config/gcloud:/root/.config/gcloud \ - -v $(KUBECONFIG):/root/.kube \ + -v $(KUBEPATH):/root/.kube \ -v $(agon_path):$(mount_path) # Use a hash of the Dockerfile for the tag, so when the Dockerfile changes, @@ -84,11 +86,15 @@ test: ensure-build-image # Push all the images up to $(REGISTRY) push: push-gameservers-controller-image push-gameservers-sidecar-image -# install the development version of Agon +# Installs the current development version of Agon into the Kubernetes cluster +install: ALWAYS_PULL_SIDECAR := true +install: IMAGE_PULL_POLICY := "Always" install: cp $(build_path)/install.yaml $(build_path)/.install.yaml - sed -i -e 's!$${REGISTRY}!$(REGISTRY)!g' -e 's!$${VERSION}!$(VERSION)!g' $(build_path)/.install.yaml - docker run --rm $(common_mounts) --entrypoint=kubectl $(build_tag) apply -f $(mount_path)/build/.install.yaml + sed -i -e 's!$${REGISTRY}!$(REGISTRY)!g' -e 's!$${VERSION}!$(VERSION)!g' \ + -e 's!$${IMAGE_PULL_POLICY}!$(IMAGE_PULL_POLICY)!g' -e 's!$${ALWAYS_PULL_SIDECAR}!$(ALWAYS_PULL_SIDECAR)!g' \ + $(build_path)/.install.yaml + docker run --rm $(common_mounts) $(ARGS) $(build_tag) kubectl apply -f $(mount_path)/build/.install.yaml # Build a static binary for the gameserver controller build-gameservers-controller-binary: ensure-build-image @@ -182,6 +188,13 @@ push-build-image: docker tag $(build_tag) $(build_remote_tag) docker push $(build_remote_tag) +# ____ _ ____ _ _ +# / ___| ___ ___ __ _| | ___ / ___| | ___ _ _ __| | +# | | _ / _ \ / _ \ / _` | |/ _ \ | | | |/ _ \| | | |/ _` | +# | |_| | (_) | (_) | (_| | | __/ | |___| | (_) | |_| | (_| | +# \____|\___/ \___/ \__, |_|\___| \____|_|\___/ \__,_|\__,_| +# |___/ + # Initialise the gcloud login and project configuration, if you are working with GCP gcloud-init: ensure-build-config docker run --rm -it \ @@ -213,7 +226,67 @@ gcloud-auth-docker: ensure-build-image sudo mv /tmp/gcloud-auth-docker/.dockercfg ~/ sudo chown $(USER) ~/.dockercfg -# Clean the kubernetes and gcloud configuration -clean-config: - -sudo rm -r $(build_path)/.kube - -sudo rm -r $(build_path)/.config \ No newline at end of file +# Clean the gcloud configuration +clean-gcloud-config: + -sudo rm -r $(build_path)/.config + +# __ __ _ _ _ _ +# | \/ (_)_ __ (_) | ___ _| |__ ___ +# | |\/| | | '_ \| | |/ / | | | '_ \ / _ \ +# | | | | | | | | | <| |_| | |_) | __/ +# |_| |_|_|_| |_|_|_|\_\\__,_|_.__/ \___| +# + +# Switches to an agon profile, and starts a kubernetes cluster +# of the right version. Also mounts the project directory into minikube, +# so that the build tools will work. +# +# Use DRIVER variable to change the VM driver (default virtualbox) if you so desire. +minikube-test-cluster: DRIVER := virtualbox +minikube-test-cluster: minikube-agon-profile + minikube start --kubernetes-version v1.8.0 --vm-driver $(DRIVER) + $(MAKE) minikube-ensure-build-image + minikube mount $(agon_path):$(agon_path) + +# switch to the agon cluster +minikube-agon-profile: + minikube profile $(MINIKUBE_PROFILE) + +# Connecting to minikube requires so enhanced permissions, so use this target +# instead of `make shell` to start an interactive shell for development on minikube. +minikube-shell: ensure-build-image + eval $$(minikube docker-env --unset) && \ + $(MAKE) shell ARGS="--network=host -v ~/.minikube:$(HOME)/.minikube" + +# Convenience target to build Agon's docker images directly on minikube. +minikube-build: minikube-ensure-build-image + eval $$(minikube docker-env) && \ + $(MAKE) build-images + +# ensure minikube has the build image, if not, grab it +minikube-ensure-build-image: ensure-build-image + @if [ -z $$(minikube ssh -- docker images -q $(build_tag)) ]; then\ + echo "Could not find $(build_tag) image. Transferring...";\ + $(MAKE) minikube-transfer TAG=$(build_tag);\ + fi + +# Instead of building Agon's docker images inside minikube, +# use this command to push the local images that have already been built +# via `make build` or `make build-images`. +# +# Depending on the virtualisation driver/configuration, +# it may be faster to build locally and push, rather than building directly on minikube. +minikube-push: + $(MAKE) minikube-transfer TAG=$(sidecar_tag) + $(MAKE) minikube-transfer TAG=$(controller_tag) + +# Installs the current development version of Agon into the Kubernetes cluster. +# Use this instead of `make install`, as it disables PullAlways on the install.yaml +minikube-install: ensure-build-image + eval $$(minikube docker-env --unset) && \ + $(MAKE) install ARGS="--network=host -v ~/.minikube:/$(HOME)/.minikube" ALWAYS_PULL_SIDECAR=false IMAGE_PULL_POLICY=IfNotPresent + +# convenience target for transferring images into minikube +minikube-transfer: + eval $$(minikube docker-env --unset) && \ + docker save $(TAG) | (eval $$(minikube docker-env) && docker load) \ No newline at end of file diff --git a/build/README.md b/build/README.md index e17f5d3428..a99e8bff99 100644 --- a/build/README.md +++ b/build/README.md @@ -7,6 +7,10 @@ Rather than installing all the dependencies locally, you can test and build Agon built from the Dockerfile in this directory. There is an accompanying Makefile for all the common tasks you may wish to accomplish. +**Note** - this has been tested on Linux. Tickets for [OSX](https://github.com/googleprivate/agon/issues/46) +and [Windows](https://github.com/googleprivate/agon/issues/47) exist, and require work. Testing on these platforms +and reporting bugs is appreciated. + ## Table of Contents @@ -18,12 +22,13 @@ tasks you may wish to accomplish. 1. [Make Variable Reference](#make-variable-reference) 1. [VERSION](#version) 1. [REGISTRY](#registry) - 1. [KUBECONFIG](#kubeconfig) + 1. [KUBEPATH](#kubepath) 1. [CLUSTER_NAME](#cluster_name) 1. [Make Target Reference](#make-target-reference) 1. [Development Targets](#development-targets) 1. [Build Image Targets](#build-image-targets) 1. [Google Cloud Platform](#google-cloud-platform) + 1. [Minikube](#minikube) ## GOPATH @@ -69,8 +74,8 @@ to be open to UDP traffic. First step is to create a Google Cloud Project at https://console.cloud.google.com or reuse an existing one. -The build tools (by default) maintain configuration for gcloud and kubectl within the `build` folder, so as to keep -everything seperate (see below for overwriting these config locations). Therefore, once the project has been created, +The build tools (by default) maintain configuration for gcloud within the `build` folder, so as to keep +everything separate (see below for overwriting these config locations). Therefore, once the project has been created, we will need to authenticate out gcloud tooling against it. To do that run `make gcloud-init` and fill in the prompts as directed. @@ -81,8 +86,8 @@ done you can go to the Google Cloud Platform console and see that a cluster is u name of the test cluster you can set the `CLUSTER_NAME` environemnt varlable to value you would like. To grab the kubectl authentication details for this cluster, run `make gcloud-auth-cluster`, which will generate the -required Kubernetes security credintials for `kubectl`. This will be stored in `build/.kube` by default, but can also be -overwritten by setting the `KUBECONFIG` environment variable before running the command. +required Kubernetes security credintials for `kubectl`. This will be stored in `~/.kube` by default, but can also be +overwritten by setting the `KUBEPATH` environment variable before running the command. Great! Now we are setup, let's try out the development shell, and see if our `kubectl` is working! @@ -106,11 +111,60 @@ To push our images up at this point, is simple `make push` and that will push up project's container registry. Now that the images are pushed, to install the development version (with all imagePolicies set to always download), -run `make install` and agon will install the image that you just built and pushed on the test cluster you +run `make install` and Agon will install the image that you just built and pushed on the test cluster you created at the beginning of this section. (if you want to see the resulting installation yaml, you can find it in `build/.install.yaml`) ### Running a Test Minikube cluster -(Coming soon: Track [this bug](https://github.com/googleprivate/agon/issues/30) for details) +This will setup a [Minikube](https://github.com/kubernetes/minikube) cluster, running on an `agon` profile, + +Because Minikube runs on a virtualisation layer on the host, some of the standard build and development Make targets +need to be replaced by Minikube specific targets. + +First, [install Minikube](https://github.com/kubernetes/minikube#installation), which may also require you to install +a virtualisation solution, such as [VirtualBox](https://www.virtualbox.org) as well. + +Next we will create the Agon Minikube cluster. Run `make minikube-test-cluster` to create an `agon` profile, +create a Kubernetes cluster under this profile of the supported version, +and mount the development code inside the Minikube instance so we are able to build Agon inside Minikube. + +This will also install the kubectl authentication credentials in `~/.kube`, and set the +[`kubectl` context](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) +to `agon`. + +Great! Now we are setup, let's try out the development shell, and see if our `kubectl` is working! + +Run `make minikube-shell` to enter the development shell. You should see a bash shell that has you as the root user. +Enter `kubectl get pods` and press enter. You should see that you have no resources currently, but otherwise see no errors. +Assuming that all works, let's exit the shell by typing `exit` and hitting enter, and look at a couple of +options for building, pushing and installing Agon next. + +There are two options for building Agon, and depending on your virtualisation solution and its configuration +each has it's pros and cons + +#### Building directly on Minikube +Since Minikube allows you to [reuse its Docker daemon](https://github.com/kubernetes/minikube/blob/master/docs/reusing_the_docker_daemon.md) +we can build our images to run Agon directly on Minikube! + +To do this, run `make minikube-build`, which will transfer the build image into the cluster +and run the `build-images` target on the Minikube instance, creating the images required to run Agon. + +Again depending on your virtualisation layer, you may want to configure it to allow it to have access to more +cores and/or memory than the default, to allow for faster compilation (or for it to compile at all). + +#### Pushing locally built images to Minikube +You may remember in the first part of this walkthrough, we ran `make build`, which created all the images and binaries +we needed to work with Agon locally. So instead of rebuilding them, can we push them straight into Minikube? + +You bet we can! + +Run `make minikube-push` which will send all of Agon's docker images from your local Docker into the Agon Minikube +instance. + +This may be better option if you find building on Minikube slow, or you just prefer to build locally. + +Now that the images are pushed, to install the development version, +run `make minikube-install` and Agon will install the images that you built and pushed to the Agon Minikube instance +created at the beginning of this section. (if you want to see the resulting installation yaml, you can find it in `build/.install.yaml`) ### Next Steps @@ -124,8 +178,9 @@ The version of this build. Version defaults to the short hash of the latest comm ### REGISTRY The registry that is being used to store docker images. Defaults to gcr.io/agon-images - the release + CI registry. -### KUBECONFIG -Where the kubectl configuration files are being stored for shell and kubectl targets. Defaults to build/.kube +### KUBEPATH +The directory the kubectl configuration files are being stored for shell and kubectl targets. +Defaults to ~/.kube (where your Kubernetes configs are likely to already exist) ### CLUSTER_NAME The (gcloud) test cluster that is being worked against. Defaults to `test-cluster` @@ -156,6 +211,9 @@ Run all tests #### `make push` Pushes all built images up to the `$(REGISTRY)` +#### `make install` +Installs the current development version of Agon into the Kubernetes cluster + #### `make shell` Run a bash shell with the developer tools (go tooling, kubectl, etc) and source code in it. @@ -189,7 +247,7 @@ Creates the build docker image ### Google Cloud Platform -A set of utilities for setting up a Container Engine cluster on Google Cloud Platform, +A set of utilities for setting up a Kubernetes Engine cluster on Google Cloud Platform, since it's an easy way to get a test cluster working with Kubernetes. #### `make gcloud-init` @@ -205,3 +263,37 @@ Pulls down authentication information for kubectl against a cluster, name can be #### `make gcloud-auth-docker` Creates a short lived access to Google Cloud container repositories, so that you are able to call `docker push` directly. Useful when used in combination with `make push` command. + +### Minikube + +A set of utilities for setting up and running a [Minikube](https://github.com/kubernetes/minikube) instance, +for local development. + +Since Minikube runs locally, there are some targets that need to be used instead of the standard ones above. + +#### `minikube-test-cluster` +Switches to an "agon" profile, and starts a kubernetes cluster +of the right version. Also mounts the project directory into Minikube, +so that the build tools will work. + +Use DRIVER variable to change the VM driver (default virtualbox) if you so desire. + +#### `minikube-build` +Convenience target to build Agon's docker images directly on Minikube. + +#### `minikube-push` +Instead of building Agon's docker images inside Minikube, +use this command to push the local images that have already been built +via `make build` or `make build-images`. + +#### `minikube-install` +Installs the current development version of Agon into the Kubernetes cluster. +Use this instead of `make install`, as it disables PullAlways on the install.yaml + +#### `minikube-shell` +Connecting to Minikube requires so enhanced permissions, so use this target +instead of `make shell` to start an interactive shell for development on Minikube. + +Depending on the virtualisation driver/configuration, +it may be faster to build locally and push, rather than building directly on Minikube. + diff --git a/build/install.yaml b/build/install.yaml index 885a6113c5..b903af376a 100644 --- a/build/install.yaml +++ b/build/install.yaml @@ -45,10 +45,10 @@ spec: containers: - name: gameservers-controller image: ${REGISTRY}/gameservers-controller:${VERSION} - imagePullPolicy: Always + imagePullPolicy: ${IMAGE_PULL_POLICY} env: - name: ALWAYS_PULL_SIDECAR # set the sidecar imagePullPolicy to Always - value: "true" + value: "${ALWAYS_PULL_SIDECAR}" - name: SIDECAR # overwrite the GameServer sidecar image that is used value: ${REGISTRY}/gameservers-sidecar:${VERSION} - name: MIN_PORT diff --git a/examples/cpp-simple/gameserver.yaml b/examples/cpp-simple/gameserver.yaml index a75d0dc974..6dd3aff5d0 100644 --- a/examples/cpp-simple/gameserver.yaml +++ b/examples/cpp-simple/gameserver.yaml @@ -25,4 +25,4 @@ spec: containers: - name: cpp-simple image: gcr.io/agon-images/cpp-simple-server:0.1 - imagePullPolicy: Always \ No newline at end of file + # imagePullPolicy: Always # add for development \ No newline at end of file diff --git a/examples/simple-udp/server/gameserver.yaml b/examples/simple-udp/server/gameserver.yaml index 821fc72803..29e76eb7b2 100644 --- a/examples/simple-udp/server/gameserver.yaml +++ b/examples/simple-udp/server/gameserver.yaml @@ -25,4 +25,4 @@ spec: containers: - name: simple-udp image: gcr.io/agon-images/udp-server:0.1 - imagePullPolicy: Always \ No newline at end of file + # imagePullPolicy: Always # add for development \ No newline at end of file diff --git a/gameservers/controller/controller.go b/gameservers/controller/controller.go index 0fd783b37d..508d63992f 100644 --- a/gameservers/controller/controller.go +++ b/gameservers/controller/controller.go @@ -401,9 +401,9 @@ func (c *Controller) syncGameServerRequestReadyState(gs *stablev1alpha1.GameServ if err != nil { return gs, errors.Wrapf(err, "error getting pod for GameServer %s", gs.ObjectMeta.Name) } - addr, err := c.externalIP(pod) + addr, err := c.Address(pod) if err != nil { - return gs, errors.Wrapf(err, "error getting external ip for GameServer %s", gs.ObjectMeta.Name) + return gs, errors.Wrapf(err, "error getting external Address for GameServer %s", gs.ObjectMeta.Name) } gsCopy := gs.DeepCopy() @@ -479,8 +479,11 @@ func (c *Controller) listGameServerPods(gs *stablev1alpha1.GameServer) ([]*corev return result, nil } -// ExternalIP returns the external IP that the given Pod is being run on -func (c Controller) externalIP(pod *corev1.Pod) (string, error) { +// Address returns the IP that the given Pod is being run on +// This should be the externalIP, but if the externalIP is +// not set, it will fall back to the internalIP with a warning. +// (basically because minikube only has an internalIP) +func (c Controller) Address(pod *corev1.Pod) (string, error) { node, err := c.nodeLister.Get(pod.Spec.NodeName) if err != nil { return "", errors.Wrapf(err, "error retrieving node %s for Pod %s", node.ObjectMeta.Name, pod.ObjectMeta.Name) @@ -492,7 +495,15 @@ func (c Controller) externalIP(pod *corev1.Pod) (string, error) { } } - return "", errors.Errorf("Could not find an external ip for Node: #%s", node.ObjectMeta.Name) + // minikube only has an InternalIP on a Node, so we'll fall back to that. + logrus.WithField("node", node.ObjectMeta.Name).Warn("Could not find ExternalIP. Falling back to Internal") + for _, a := range node.Status.Addresses { + if a.Type == corev1.NodeInternalIP { + return a.Address, nil + } + } + + return "", errors.Errorf("Could not find an Address for Node: %s", node.ObjectMeta.Name) } // waitForEstablishedCRD blocks until CRD comes to an Established state. diff --git a/gameservers/controller/controller_test.go b/gameservers/controller/controller_test.go index 31d9f5c6a2..9c0327058b 100644 --- a/gameservers/controller/controller_test.go +++ b/gameservers/controller/controller_test.go @@ -608,28 +608,52 @@ func TestSyncGameServerShutdownState(t *testing.T) { }) } -func TestControllerExternalIP(t *testing.T) { +func TestControllerAddress(t *testing.T) { t.Parallel() - c, mocks := newFakeController() - ipfixture := "12.12.12.12" - node := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Status: corev1.NodeStatus{Addresses: []corev1.NodeAddress{{Address: ipfixture, Type: corev1.NodeExternalIP}}}} - pod := corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod"}, - Spec: corev1.PodSpec{NodeName: node.ObjectMeta.Name}} - - mocks.kubeClient.AddReactor("list", "pods", func(action k8stesting.Action) (bool, runtime.Object, error) { - return true, &corev1.PodList{Items: []corev1.Pod{pod}}, nil - }) - mocks.kubeClient.AddReactor("list", "nodes", func(action k8stesting.Action) (bool, runtime.Object, error) { - return true, &corev1.NodeList{Items: []corev1.Node{node}}, nil - }) - - _, cancel := startInformers(mocks, c.gameServerSynced) - defer cancel() + fixture := map[string]struct { + node corev1.Node + expectedAddress string + }{ + "node with external ip": { + node: corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Status: corev1.NodeStatus{Addresses: []corev1.NodeAddress{{Address: "12.12.12.12", Type: corev1.NodeExternalIP}}}}, + expectedAddress: "12.12.12.12", + }, + "node with an internal ip": { + node: corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Status: corev1.NodeStatus{Addresses: []corev1.NodeAddress{{Address: "11.11.11.11", Type: corev1.NodeInternalIP}}}}, + expectedAddress: "11.11.11.11", + }, + "node with internal and external ip": { + node: corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Status: corev1.NodeStatus{Addresses: []corev1.NodeAddress{ + {Address: "9.9.9.8", Type: corev1.NodeExternalIP}, + {Address: "12.12.12.12", Type: corev1.NodeInternalIP}, + }}}, + expectedAddress: "9.9.9.8", + }, + } - addr, err := c.externalIP(&pod) - assert.Nil(t, err) - assert.Equal(t, ipfixture, addr) + for name, fixture := range fixture { + t.Run(name, func(t *testing.T) { + c, mocks := newFakeController() + pod := corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod"}, + Spec: corev1.PodSpec{NodeName: fixture.node.ObjectMeta.Name}} + + mocks.kubeClient.AddReactor("list", "pods", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &corev1.PodList{Items: []corev1.Pod{pod}}, nil + }) + mocks.kubeClient.AddReactor("list", "nodes", func(action k8stesting.Action) (bool, runtime.Object, error) { + return true, &corev1.NodeList{Items: []corev1.Node{fixture.node}}, nil + }) + + _, cancel := startInformers(mocks, c.gameServerSynced) + defer cancel() + + addr, err := c.Address(&pod) + assert.Nil(t, err) + assert.Equal(t, fixture.expectedAddress, addr) + }) + } } func TestControllerGameServerPod(t *testing.T) {