Skip to content

Commit

Permalink
Dynamic Port Allocation for GameServers
Browse files Browse the repository at this point in the history
Implementation of a PortAllocator that ties into the
GameServer controller that will allocate a port on
GameServer creations (if "dynamic" portPolicy is set)
and then also release the port when a GameServer has been
deleted.

This also includes experimental support for node adding,
removal and unscheduling within the cluster.

Closes #14
  • Loading branch information
markmandel committed Jan 2, 2018
1 parent cd963b1 commit df72caf
Show file tree
Hide file tree
Showing 14 changed files with 850 additions and 91 deletions.
2 changes: 1 addition & 1 deletion build/build-image/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ RUN wget -q https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.zip && un
/opt/google-cloud-sdk/install.sh --usage-reporting=true --path-update=true --bash-completion=true --rc-path=/root/.bashrc

# update the path for both go and gcloud
ENV PATH /usr/local/go/bin:/opt/google-cloud-sdk/bin:$PATH
ENV PATH /usr/local/go/bin:/go/bin:/opt/google-cloud-sdk/bin:$PATH

# RUN gcloud components update
RUN gcloud components update && gcloud components install kubectl
Expand Down
3 changes: 2 additions & 1 deletion build/build-image/gen-crd-client.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@
# limitations under the License.

rsync -r /go/src/github.com/agonio/agon/vendor/k8s.io/ /go/src/k8s.io/
/go/src/k8s.io/code-generator/generate-groups.sh "all" \
cd /go/src/k8s.io/code-generator
./generate-groups.sh "all" \
github.com/agonio/agon/pkg/client \
github.com/agonio/agon/pkg/apis stable:v1alpha1 \
--go-header-file=/go/src/github.com/agonio/agon/build/boilerplate.go.txt
4 changes: 4 additions & 0 deletions build/install.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ spec:
value: "true"
- name: SIDECAR # overwrite the GameServer sidecar image that is used
value: ${REGISTRY}/gameservers-sidecar:${VERSION}
- name: MIN_PORT
value: "7000"
- name: MAX_PORT
value: "8000"
livenessProbe:
httpGet:
path: /healthz
Expand Down
6 changes: 3 additions & 3 deletions examples/cpp-simple/gameserver.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@
apiVersion: "stable.agon.io/v1alpha1"
kind: GameServer
metadata:
name: cpp-simple
# generate a unique name
# will need to be created with `kubectl create`
generateName: cpp-simple-
spec:
portPolicy: "static"
containerPort: 7654
hostPort: 7778
template:
spec:
containers:
Expand Down
16 changes: 11 additions & 5 deletions examples/gameserver.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,21 +25,27 @@

apiVersion: "stable.agon.io/v1alpha1"
kind: GameServer
# GameServer Metadata
# https://v1-8.docs.kubernetes.io/docs/api-reference/v1.8/#objectmeta-v1-meta
metadata:
name: "gds-example"
# generateName: "gds-example" # generate a unique name, with the given prefix
name: "gds-example" # set a fixed name
spec:
# if there is more than one container, specify which one is the game server
container: example-server
# `static` is the only current option. Dynamic port allocated will come in future releases.
# When `static` is the policy specified, `hostPort` is required, to specify the port that game clients will connect to
# portPolicy has two options:
# - "dynamic" (default) the system allocates a free hostPort for the gameserver, for game clients to connect to
# - "static", user defines the hostPort that the game client will connect to. Then onus is on the user to ensure that the
# port is available. When static is the policy specified, `hostPort` is required to be populated
portPolicy: "static"
# the port that is being opened on the game server process
containerPort: 7654
# the port exposed on the host
# the port exposed on the host, only required when `portPolicy` is "static". Overwritten when portPolicy is "dynamic".
hostPort: 7777
# protocol being used. Defaults to UDP. TCP is the only other option
protocol: UDP
# Pod configuration
# Pod template configuration
# https://v1-8.docs.kubernetes.io/docs/api-reference/v1.8/#podtemplate-v1-core
template:
# pod metadata. Name & Namespace is overwritten
metadata:
Expand Down
35 changes: 28 additions & 7 deletions gameservers/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,8 @@
package main

import (
"time"

"net/http"
"time"

"github.com/agonio/agon/pkg/apis/stable"
stablev1alpha1 "github.com/agonio/agon/pkg/apis/stable/v1alpha1"
Expand Down Expand Up @@ -60,14 +59,16 @@ type Controller struct {
gameServerSynced cache.InformerSynced
nodeLister corelisterv1.NodeLister
queue workqueue.RateLimitingInterface
portAllocator *PortAllocator
server *http.Server

// this allows for overwriting for testing purposes
syncHandler func(string) error
}

// NewController returns a new gameserver crd controller
func NewController(sidecarImage string,
func NewController(minPort, maxPort int32,
sidecarImage string,
alwaysPullSidecarImage bool,
kubeClient kubernetes.Interface,
kubeInformerFactory informers.SharedInformerFactory,
Expand All @@ -89,6 +90,7 @@ func NewController(sidecarImage string,
gameServerSynced: gsInformer.HasSynced,
nodeLister: kubeInformerFactory.Core().V1().Nodes().Lister(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), stable.GroupName),
portAllocator: NewPortAllocator(minPort, maxPort, kubeInformerFactory, agonInformerFactory),
}

gsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
Expand Down Expand Up @@ -118,7 +120,11 @@ func NewController(sidecarImage string,

mux := http.NewServeMux()
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("ok"))
_, err := w.Write([]byte("ok"))
if err != nil {
logrus.WithError(err).Error("could not send ok response on healthz")
w.WriteHeader(http.StatusInternalServerError)
}
})

c.server = &http.Server{
Expand All @@ -140,7 +146,7 @@ func (c Controller) Run(threadiness int, stop <-chan struct{}) error {
logrus.WithError(err).Error("Could not listen on :8080")
}
}()
defer c.server.Close()
defer c.server.Close() // nolint: errcheck

err := c.waitForEstablishedCRD()
if err != nil {
Expand All @@ -152,6 +158,10 @@ func (c Controller) Run(threadiness int, stop <-chan struct{}) error {
return errors.New("failed to wait for caches to sync")
}

if err := c.portAllocator.Run(stop); err != nil {
return err
}

logrus.Info("Starting workers...")
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stop)
Expand Down Expand Up @@ -298,6 +308,16 @@ func (c *Controller) syncGameServerBlankState(gs *stablev1alpha1.GameServer) (*s
if gs.Status.State == "" && gs.ObjectMeta.DeletionTimestamp.IsZero() {
gsCopy := gs.DeepCopy()
gsCopy.ApplyDefaults()

// manage dynamic ports
if gsCopy.Spec.PortPolicy == stablev1alpha1.Dynamic {
port, err := c.portAllocator.Allocate()
if err != nil {
return gsCopy, errors.Wrapf(err, "error allocating port for GameServer %s", gsCopy.Name)
}
gsCopy.Spec.HostPort = port
}

logrus.WithField("gs", gsCopy).Info("Syncing Blank State")
gs, err := c.gameServerGetter.GameServers(gs.ObjectMeta.Namespace).Update(gsCopy)
return gs, errors.Wrapf(err, "error updating GameServer %s to default values", gs.Name)
Expand Down Expand Up @@ -384,6 +404,7 @@ func (c *Controller) syncGameServerRequestReadyState(gs *stablev1alpha1.GameServ
gsCopy := gs.DeepCopy()
gsCopy.Status.State = stablev1alpha1.Ready
gsCopy.Status.Address = addr
gsCopy.Status.NodeName = pod.Spec.NodeName
// HostPort is always going to be populated, even when dynamic
// This will be a double up of information, but it will be easier to read
gsCopy.Status.Port = gs.Spec.HostPort
Expand All @@ -398,8 +419,8 @@ func (c *Controller) syncGameServerRequestReadyState(gs *stablev1alpha1.GameServ
func (c *Controller) syncGameServerShutdownState(gs *stablev1alpha1.GameServer) (*stablev1alpha1.GameServer, error) {
if gs.Status.State == stablev1alpha1.Shutdown && gs.ObjectMeta.DeletionTimestamp.IsZero() {
logrus.WithField("gs", gs).Info("Syncing Shutdown State")
// let's be explicit about how we want to shut things down
p := metav1.DeletePropagationBackground
// Do it in the foreground, so the gameserver gets killed last
p := metav1.DeletePropagationForeground
err := c.gameServerGetter.GameServers(gs.ObjectMeta.Namespace).Delete(gs.ObjectMeta.Name, &metav1.DeleteOptions{PropagationPolicy: &p})
return nil, errors.Wrapf(err, "error deleting Game Server %s", gs.ObjectMeta.Name)
}
Expand Down
Loading

0 comments on commit df72caf

Please sign in to comment.