From 9c6597bc401cf50b3c6411b41d0104b0870e7335 Mon Sep 17 00:00:00 2001 From: Mark Mandel Date: Fri, 21 May 2021 15:01:39 -0700 Subject: [PATCH] Respectful code cleanup No.1 (#2107) Got some more work to do, but it's a start on switching out some verbiage to be more inclusive. More to come! --- CONTRIBUTING.md | 4 +- build/README.md | 11 ++--- build/prometheus.yaml | 2 +- examples/xonotic/server.cfg | 10 ++--- site/content/en/docs/FAQ/_index.md | 8 ++-- site/content/en/docs/Guides/metrics.md | 40 ++++++++++--------- .../content/en/docs/Installation/upgrading.md | 2 +- site/handler.go | 2 +- site/handler_test.go | 2 +- site/themes/docsy/config.toml | 8 ---- 10 files changed, 44 insertions(+), 45 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 364289dae5..5a4cd49bee 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -107,6 +107,8 @@ repository, we have a [community membership guide](./docs/governance/community_m - [Kubernetes Custom Resources](https://kubernetes.io/docs/concepts/api-extension/custom-resources/) - This is how we define our own resource names (`GameServer`, etc) within Kubernetes. +- [Kubernetes Controllers](https://kubernetes.io/docs/concepts/architecture/controller/) - + Kubernetes documentation on writing controllers. - [Extend the Kubernetes API with CustomResourceDefinitions](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) - This page shows how to install a custom resource into the Kubernetes API by creating a CustomResourceDefinition. - [Joe Beda's TGIK Controller](https://github.com/jbeda/tgik-controller) - @@ -116,7 +118,7 @@ repository, we have a [community membership guide](./docs/governance/community_m Example of a Custom Resources with a Kubernetes Controller. - [Kubernetes Code Generator](https://github.com/kubernetes/code-generator) - The tooling that generated the Go libraries for the Custom Resource we define -- [Kubernetes Controller Best Practices](https://github.com/kubernetes/community/blob/master/contributors/devel/controllers.md) - +- [Kubernetes Controller Best Practices](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/controllers.md) - Set of best practices written for writing Controllers inside Kubernetes. Also a great list for everywhere else too. - [Writing Kube Controllers for Everyone - Maciej Szulik, Red Hat](https://www.youtube.com/watch?v=AUNPLQVxvmw) - A great intro video into coding for Controllers, and explaining Informers and Listers. diff --git a/build/README.md b/build/README.md index af47eacbdf..fc6663a24b 100644 --- a/build/README.md +++ b/build/README.md @@ -280,7 +280,7 @@ Now that the images are pushed, to install the development version, run `make minikube-install` and Agones will install the images that you built and pushed to the Agones Minikube instance (if you want to see the resulting installation yaml, you can find it in `build/.install.yaml`). -It's worth noting that Minikube does let you [reuse its Docker daemon](https://github.com/kubernetes/minikube/blob/master/docs/reusing_the_docker_daemon.md), +It's worth noting that Minikube does let you [reuse its Docker daemon](https://minikube.sigs.k8s.io/docs/handbook/pushing/#1-pushing-directly-to-the-in-cluster-docker-daemon-docker-env), and build directly on Minikube, but in this case this approach is far simpler, and makes cross-platform support for the build system much easier. @@ -323,7 +323,7 @@ run `make kind-install` and Agones will install the images that you built and pu Running end-to-end tests on Kind is done via the `make kind-test-e2e` target. This target use the same `make test-e2e` but also setup some prerequisites for use with a Kind cluster. -If you are having performance issues, check out these docs [here](https://github.com/kubernetes-sigs/kind/tree/master/docs/user#creating-a-cluster) +If you are having performance issues, check out these docs [here](https://kind.sigs.k8s.io/docs/user/quick-start/#creating-a-cluster) ### Running a Custom Test Environment @@ -503,7 +503,8 @@ Run controller failure portion of the end-to-end tests. #### `make setup-prometheus` -Install Prometheus server using [stable/prometheus](https://github.com/helm/charts/tree/master/stable/prometheus) chart into the current cluster. +Install Prometheus server using [Prometheus Community](https://prometheus-community.github.io/helm-charts) +chart into the current cluster. By default all exporters and alertmanager is disabled. @@ -517,7 +518,8 @@ Run helm repo update to get the mose recent charts. #### `make setup-grafana` -Install Gafrana server using [stable/grafana](https://github.com/helm/charts/tree/master/stable/grafana) chart into the current cluster and setup [Agones dashboards with Prometheus datasource](./grafana/). +Install Grafana server using [grafana community](https://grafana.github.io/helm-charts) chart into +the current cluster and setup [Agones dashboards with Prometheus datasource](./grafana/). You can set your own password using the `PASSWORD` environment variable. @@ -710,7 +712,6 @@ port forwarding to the controller deployment. ### Kind [Kind - kubernetes in docker](https://github.com/kubernetes-sigs/kind) is a tool for running local Kubernetes clusters using Docker container "nodes". -Kind is primarily designed for testing Kubernetes 1.11+, initially targeting the [conformance tests](https://github.com/kubernetes/community/blob/master/contributors/devel/conformance-tests.md). Since Kind runs locally, there are some targets that need to be used instead of the standard ones above. diff --git a/build/prometheus.yaml b/build/prometheus.yaml index 8165fd5041..b97244c4e3 100644 --- a/build/prometheus.yaml +++ b/build/prometheus.yaml @@ -68,7 +68,7 @@ serverFiles: tls_config: ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that + # Kubernetes control plane CA, then disable certificate verification below. Note that # certificate verification is an integral part of a secure infrastructure # so this should only be disabled in a controlled environment. You can # disable certificate verification by uncommenting the line below. diff --git a/examples/xonotic/server.cfg b/examples/xonotic/server.cfg index 152f0bfb0a..994253f72b 100644 --- a/examples/xonotic/server.cfg +++ b/examples/xonotic/server.cfg @@ -85,13 +85,13 @@ bot_prefix [BOT] // prepend this to all botnames //sv_vote_gametype_mygametype_description "This is my custom gametype" // the description for the custom gametype //// Custom icons for custom gametypes are supported and have to be located in a server pk3 as gfx/menu/default/gametype_mygametype -//sv_vote_commands "restart fraglimit chmap gotomap nextmap endmatch reducematchtime extendmatchtime allready kick cointoss movetoauto shuffleteams" // players can vote for those commands or use them if they are masters. You canm also add 'g_grappling_hook' for hook voting, and 'sv_fbskin_green sv_fbskin_red sv_fbskin_orange sv_fbskin_off' for fbskin voting. -//sv_vote_master_commands "movetored movetoblue movetoyellow movetopink" // add commands masters can use if logged in or elected. You may want to put 'kickban' here, so masters can keep out punks. It may be good to also put "sv_status_privacy 0" then... +//sv_vote_commands "restart fraglimit chmap gotomap nextmap endmatch reducematchtime extendmatchtime allready kick cointoss movetoauto shuffleteams" // players can vote for those commands or use them if they are admins. You canm also add 'g_grappling_hook' for hook voting, and 'sv_fbskin_green sv_fbskin_red sv_fbskin_orange sv_fbskin_off' for fbskin voting. +//sv_vote_master_commands "movetored movetoblue movetoyellow movetopink" // add commands admins can use if logged in or elected. You may want to put 'kickban' here, so admins can keep out punks. It may be good to also put "sv_status_privacy 0" then... //rcon_restricted_commands "restart fraglimit chmap gotomap endmatch reducematchtime extendmatchtime allready kick kickban \"sv_cmd bans\" \"sv_cmd unban\" status \"sv_cmd teamstatus\" movetoauto movetored movetoblue movetoyellow movetopink" // commands for the (stronger) rcon restricted //sv_vote_call 1 // 0 will disable the normal voting -//sv_vote_master 1 // 0 will disable voting to become master, good if you prefer to use the master password instead -//sv_vote_master_password "" // when set, vlogin PWD will allow people to become master to run master commands directly using vdo +//sv_vote_master 1 // 0 will disable voting to become admin, good if you prefer to use the admin password instead +//sv_vote_master_password "" // when set, vlogin PWD will allow people to become admin to run admin commands directly using vdo //sv_vote_majority_factor 0.5 // What percentage of the PLAYERS constitute a majority to win a vote? must be at least 0.5 //sv_vote_majority_factor_of_voted 0 // What percentage of the VOTERS who already voted constitute a majority to win a vote? must be at least 0.5 // note: to JUST support simple majorities, set these two factors equal @@ -131,7 +131,7 @@ bot_prefix [BOT] // prepend this to all botnames // - Optimize the game balance for future releases // - Identify problems in the bot AI and improving it // - Find settings that make servers "impure" that should not, so we can -// whitelist them in future releases +// allowlist them in future releases // // We will or might publish: // - Global weapon pairing statistics for players vs players, or bots vs bots, diff --git a/site/content/en/docs/FAQ/_index.md b/site/content/en/docs/FAQ/_index.md index d99677bfbe..30d74a1003 100644 --- a/site/content/en/docs/FAQ/_index.md +++ b/site/content/en/docs/FAQ/_index.md @@ -119,12 +119,12 @@ The answer to this question is "it depends" 😁. As a rule of thumb, we recommend clusters no larger than 500 nodes, based on production workloads. -That being said, this is highly dependent on Kubernetes hosting platform, master resources, nodes resources, resource -requirements of your game server, game server session length, node spin up time, etc, and therefore you should run your -own load tests against your hosting provider to determine the optimal cluster size for your game. +That being said, this is highly dependent on Kubernetes hosting platform, control plane resources, node resources, +requirements of your game server, game server session length, node spin up time, etc, and therefore you +should run your own load tests against your hosting provider to determine the optimal cluster size for your game. We recommend running multiple clusters for your production GameServer workloads, to spread the load and -provide extra redundancy across your entire game server fleet. +provide extra redundancy across your entire game server fleet. ## Network diff --git a/site/content/en/docs/Guides/metrics.md b/site/content/en/docs/Guides/metrics.md index dff8807d29..e44e8b55f4 100644 --- a/site/content/en/docs/Guides/metrics.md +++ b/site/content/en/docs/Guides/metrics.md @@ -15,7 +15,7 @@ We plan to support multiple exporters in the future via environment variables an ### Prometheus -If you are running a [Prometheus](https://prometheus.io/) instance you just need to ensure that metrics and kubernetes service discovery are enabled. (helm chart values `agones.metrics.prometheusEnabled` and `agones.metrics.prometheusServiceDiscovery`). This will automatically add annotations required by Prometheus to discover Agones metrics and start collecting them. (see [example](https://github.com/prometheus/prometheus/tree/master/documentation/examples/kubernetes-rabbitmq)) +If you are running a [Prometheus](https://prometheus.io/) instance you just need to ensure that metrics and kubernetes service discovery are enabled. (helm chart values `agones.metrics.prometheusEnabled` and `agones.metrics.prometheusServiceDiscovery`). This will automatically add annotations required by Prometheus to discover Agones metrics and start collecting them. (see [example](https://github.com/prometheus/prometheus/tree/main/documentation/examples/kubernetes-rabbitmq)) ### Prometheus Operator @@ -80,27 +80,27 @@ Follow the [Stackdriver Installation steps](#stackdriver-installation) to see yo ### Grafana Dashboards -We provide a set of useful [Grafana](https://grafana.com/) dashboards to monitor Agones workload, they are located under the {{< ghlink href="/build/grafana" branch="master" >}}grafana folder{{< /ghlink >}}: +We provide a set of useful [Grafana](https://grafana.com/) dashboards to monitor Agones workload, they are located under the {{< ghlink href="/build/grafana" branch="main" >}}grafana folder{{< /ghlink >}}: -- {{< ghlink href="/build/grafana/dashboard-autoscalers.yaml" branch="master" >}}Agones Autoscalers{{< /ghlink >}} allows you to monitor your current autoscalers replicas request as well as fleet replicas allocation and readyness statuses. You can only select one autoscaler at the time using the provided dropdown. +- {{< ghlink href="/build/grafana/dashboard-autoscalers.yaml" branch="main" >}}Agones Autoscalers{{< /ghlink >}} allows you to monitor your current autoscalers replicas request as well as fleet replicas allocation and readyness statuses. You can only select one autoscaler at the time using the provided dropdown. -- {{< ghlink href="/build/grafana/dashboard-gameservers.yaml" branch="master" >}}Agones GameServers{{< /ghlink >}} displays your current game servers workload status (allocations, game servers statuses, fleets replicas) with optional fleet name filtering. +- {{< ghlink href="/build/grafana/dashboard-gameservers.yaml" branch="main" >}}Agones GameServers{{< /ghlink >}} displays your current game servers workload status (allocations, game servers statuses, fleets replicas) with optional fleet name filtering. -- {{< ghlink href="/build/grafana/dashboard-allocations.yaml" branch="master" >}}Agones GameServer Allocations{{< /ghlink >}} displays Agones gameservers allocations rates and counts per fleet. +- {{< ghlink href="/build/grafana/dashboard-allocations.yaml" branch="main" >}}Agones GameServer Allocations{{< /ghlink >}} displays Agones gameservers allocations rates and counts per fleet. -- {{< ghlink href="/build/grafana/dashboard-allocator-usage.yaml" branch="master" >}}Agones Allocator Resource{{< /ghlink >}} displays Agones Allocators CPU, memory usage and also some useful Golang runtime metrics. +- {{< ghlink href="/build/grafana/dashboard-allocator-usage.yaml" branch="main" >}}Agones Allocator Resource{{< /ghlink >}} displays Agones Allocators CPU, memory usage and also some useful Golang runtime metrics. -- {{< ghlink href="/build/grafana/dashboard-status.yaml" branch="master" >}}Agones Status{{< /ghlink >}} displays Agones controller health status. +- {{< ghlink href="/build/grafana/dashboard-status.yaml" branch="main" >}}Agones Status{{< /ghlink >}} displays Agones controller health status. -- {{< ghlink href="/build/grafana/dashboard-controller-usage.yaml" branch="master" >}}Agones Controller Resource Usage{{< /ghlink >}} displays Agones Controller CPU and memory usage and also some Golang runtime metrics. +- {{< ghlink href="/build/grafana/dashboard-controller-usage.yaml" branch="main" >}}Agones Controller Resource Usage{{< /ghlink >}} displays Agones Controller CPU and memory usage and also some Golang runtime metrics. -- {{< ghlink href="/build/grafana/dashboard-goclient-requests.yaml" branch="master" >}}Agones Controller go-client requests{{< /ghlink >}} displays Agones Controller Kubernetes API consumption. +- {{< ghlink href="/build/grafana/dashboard-goclient-requests.yaml" branch="main" >}}Agones Controller go-client requests{{< /ghlink >}} displays Agones Controller Kubernetes API consumption. -- {{< ghlink href="/build/grafana/dashboard-goclient-caches.yaml" branch="master" >}}Agones Controller go-client caches{{< /ghlink >}} displays Agones Controller Kubernetes Watches/Lists operations used. +- {{< ghlink href="/build/grafana/dashboard-goclient-caches.yaml" branch="main" >}}Agones Controller go-client caches{{< /ghlink >}} displays Agones Controller Kubernetes Watches/Lists operations used. -- {{< ghlink href="/build/grafana/dashboard-goclient-workqueues.yaml" branch="master" >}}Agones Controller go-client workqueues{{< /ghlink >}} displays Agones Controller workqueue processing time and rates. +- {{< ghlink href="/build/grafana/dashboard-goclient-workqueues.yaml" branch="main" >}}Agones Controller go-client workqueues{{< /ghlink >}} displays Agones Controller workqueue processing time and rates. -- {{< ghlink href="/build/grafana/dashboard-apiserver-requests.yaml" branch="master" >}}Agones Controller API Server requests{{< /ghlink >}} displays your current API server request rate, errors rate and request latencies with optional CustomResourceDefinition filtering by Types: fleets, gameserversets, gameservers, gameserverallocations. +- {{< ghlink href="/build/grafana/dashboard-apiserver-requests.yaml" branch="main" >}}Agones Controller API Server requests{{< /ghlink >}} displays your current API server request rate, errors rate and request latencies with optional CustomResourceDefinition filtering by Types: fleets, gameserversets, gameservers, gameserverallocations. Dashboard screenshots : @@ -109,7 +109,7 @@ Dashboard screenshots : ![grafana dashboard controller](../../../images/grafana-dashboard-controller.png) {{< alert title="Note" color="info">}} -You can import our dashboards by copying the json content from {{< ghlink href="/build/grafana" branch="master" >}}each config map{{< /ghlink >}} into your own instance of Grafana (+ > Create > Import > Or paste json) or follow the [installation](#installation) guide. +You can import our dashboards by copying the json content from {{< ghlink href="/build/grafana" branch="main" >}}each config map{{< /ghlink >}} into your own instance of Grafana (+ > Create > Import > Or paste json) or follow the [installation](#installation) guide. {{< /alert >}} ## Installation @@ -122,7 +122,7 @@ Before attemping this guide you should make sure you have [kubectl](https://kube Prometheus is an open source monitoring solution, we will use it to store Agones controller metrics and query back the data. -Let's install Prometheus using the [helm stable](https://github.com/helm/charts/tree/master/stable/prometheus) repository. +Let's install Prometheus using the [Prometheus Community Kubernetes Helm Charts](https://prometheus-community.github.io/helm-charts/) repository. ```bash helm repo add prometheus-community https://prometheus-community.github.io/helm-charts @@ -136,7 +136,7 @@ helm upgrade --install --wait prom prometheus-community/prometheus --version 11. ``` For resiliency it is recommended to run Prometheus on a dedicated node which is separate from nodes where Game Servers -are scheduled. If you use the above command, with our {{< ghlink href="/build/prometheus.yaml" branch="master" >}}prometheus.yaml{{< /ghlink >}} to set up Prometheus, it will schedule Prometheus pods on nodes +are scheduled. If you use the above command, with our {{< ghlink href="/build/prometheus.yaml" branch="main" >}}prometheus.yaml{{< /ghlink >}} to set up Prometheus, it will schedule Prometheus pods on nodes tainted with `agones.dev/agones-metrics=true:NoExecute` and labeled with `agones.dev/agones-metrics=true` if available. As an example, to set up a dedicated node pool for Prometheus on GKE, run the following command before installing Prometheus. Alternatively you can taint and label nodes manually. @@ -150,11 +150,13 @@ gcloud container node-pools create agones-metrics --cluster=... --zone=... \ By default we will disable the push gateway (we don't need it for Agones) and other exporters. -The helm [chart](https://github.com/helm/charts/tree/master/stable/prometheus) support [nodeSelector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector), [affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) and [toleration](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/), you can use them to schedule prometheus deployments on an isolated node(s) to have an homogeneous game servers workload. +The helm chart supports +[nodeSelector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector), +[affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) and [toleration](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/), you can use them to schedule Prometheus deployments on an isolated node(s) to have an homogeneous game servers workload. This will install a Prometheus Server in your current cluster with [Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) (Deactivated for Minikube and Kind) for storing and querying time series, it will automatically start collecting metrics from Agones Controller. -Finally to access Prometheus metrics, rules and alerts explorer use +Finally, to access Prometheus metrics, rules and alerts explorer use ```bash kubectl port-forward deployments/prom-prometheus-server 9090 -n metrics @@ -180,7 +182,9 @@ First we will install [Agones dashboard](#grafana-dashboards) as [config maps](h kubectl apply -f ./build/grafana/ ``` -Now we can install [grafana chart](https://github.com/helm/charts/tree/master/stable/grafana) from stable repository. (Replace `` with the admin password of your choice) +Now we can install the +[Grafana Community Kubernetes Helm Charts](https://grafana.github.io/helm-charts/) from +their repository. (Replace `` with the admin password of your choice) ```bash helm repo add grafana https://grafana.github.io/helm-charts diff --git a/site/content/en/docs/Installation/upgrading.md b/site/content/en/docs/Installation/upgrading.md index 0603da2873..7661ffc4b4 100644 --- a/site/content/en/docs/Installation/upgrading.md +++ b/site/content/en/docs/Installation/upgrading.md @@ -125,7 +125,7 @@ upgrades. 1. Start your maintenance window. 1. Scale your Fleets down to 0 and/or delete your GameServers. This is a good safety measure so there aren't race conditions between the Agones controller being recreated and GameServers being deleted doesn't occur, and GameServers can end up stuck in erroneous states. -1. Start and complete you master upgrade(s). +1. Start and complete your control plane upgrade(s). 1. Start and complete your node upgrades. 1. Scale your Fleets back up and/or recreate your GameServers. 1. Run any other tests to ensure the Agones installation is still working as expected. diff --git a/site/handler.go b/site/handler.go index 7a3ab88dcc..40621f465b 100644 --- a/site/handler.go +++ b/site/handler.go @@ -72,7 +72,7 @@ func newHandler(config []byte) (*handler, error) { case e.Display != "": // Already filled in. case strings.HasPrefix(e.Repo, "https://github.com/"): - pc.display = fmt.Sprintf("%v %v/tree/master{/dir} %v/blob/master{/dir}/{file}#L{line}", e.Repo, e.Repo, e.Repo) + pc.display = fmt.Sprintf("%v %v/tree/main{/dir} %v/blob/main{/dir}/{file}#L{line}", e.Repo, e.Repo, e.Repo) case strings.HasPrefix(e.Repo, "https://bitbucket.org"): pc.display = fmt.Sprintf("%v %v/src/default{/dir} %v/src/default{/dir}/{file}#{file}-{line}", e.Repo, e.Repo, e.Repo) } diff --git a/site/handler_test.go b/site/handler_test.go index 9a1263795d..6fe2c975cd 100644 --- a/site/handler_test.go +++ b/site/handler_test.go @@ -51,7 +51,7 @@ func TestHandler(t *testing.T) { " repo: https://github.com/rakyll/portmidi\n", path: "/portmidi", goImport: "example.com/portmidi git https://github.com/rakyll/portmidi", - goSource: "example.com/portmidi https://github.com/rakyll/portmidi https://github.com/rakyll/portmidi/tree/master{/dir} https://github.com/rakyll/portmidi/blob/master{/dir}/{file}#L{line}", + goSource: "example.com/portmidi https://github.com/rakyll/portmidi https://github.com/rakyll/portmidi/tree/main{/dir} https://github.com/rakyll/portmidi/blob/main{/dir}/{file}#L{line}", }, { name: "Bitbucket Mercurial", diff --git a/site/themes/docsy/config.toml b/site/themes/docsy/config.toml index 0b5b7535bc..1545df75b0 100644 --- a/site/themes/docsy/config.toml +++ b/site/themes/docsy/config.toml @@ -9,11 +9,3 @@ time_format_blog = "Monday, January 02, 2006" time_format_default = "January 2, 2006" # Sections to publish in the main RSS feed. rss_sections = ["blog"] - - -# For a full list of parameters used in Docsy sites, see: -# https://github.com/google/docsy-example/blob/master/config.toml - - - -