Skip to content

Commit

Permalink
add kubernetes deployment and workflow definition
Browse files Browse the repository at this point in the history
  • Loading branch information
florianBachinger committed Nov 13, 2023
1 parent 572f528 commit 8809420
Show file tree
Hide file tree
Showing 29 changed files with 1,943 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,11 @@
# with:
# cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }}

name: 'Terraform'
name: 'Kubernetes Setup'

on:
push:
branches: [ "main" ]
pull_request:

permissions:
Expand Down Expand Up @@ -101,3 +102,43 @@ jobs:
- name: Terraform Apply
shell: bash
run: terraform apply -auto-approve -input=false

inventory:
needs: terraform
name: 'Copy Inventory'
runs-on: self-hosted
environment: production

defaults:
run:
shell: bash
steps:
- name: Copy Inventory File
run: cp -f /home/spainfra/terraform_state/kubernetes_cluster/inventory.ini ./kubernetes/kubespray/inventory/

kubespray:
needs: inventory
name: 'Kubespray'
runs-on: self-hosted
container:
image: quay.io/kubespray/kubespray:v2.23.1
volumes:
- ./kubernetes/kubespray/inventory:/inventory
- /home/spainfra/.ssh/id_rsa:/root/.ssh/id_rsa
options: --user root
defaults:
run:
working-directory: /kubespray/
steps:
- name: Folder
run: pwd
- name: Deploy Kubernetes
run: ls
- name: Deploy Kubernetes
run: ls /inventory/
- name: Deploy Kubernetes
run: ls /kubespray/
- name: Deploy Kubernetes
run: whoami
- name: Deploy Kubernetes
run: ansible-playbook -i /inventory/inventory.ini --private-key /root/.ssh/id_rsa cluster.yml -become --become-user=root
139 changes: 139 additions & 0 deletions kubernetes/kubespray/inventory/group_vars/all/all.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
---
## Directory where the binaries will be installed
bin_dir: /usr/local/bin

## The access_ip variable is used to define how other nodes should access
## the node. This is used in flannel to allow other flannel nodes to see
## this node for example. The access_ip is really useful AWS and Google
## environments where the nodes are accessed remotely by the "public" ip,
## but don't know about that address themselves.
# access_ip: 1.1.1.1


## External LB example config
## apiserver_loadbalancer_domain_name: "elb.some.domain"
# loadbalancer_apiserver:
# address: 1.2.3.4
# port: 1234

## Internal loadbalancers for apiservers
# loadbalancer_apiserver_localhost: true
# valid options are "nginx" or "haproxy"
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"

## Local loadbalancer should use this port
## And must be set port 6443
loadbalancer_apiserver_port: 6443

## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
loadbalancer_apiserver_healthcheck_port: 8081

### OTHER OPTIONAL VARIABLES

## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries.
## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage.
## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail.
# disable_host_nameservers: false

## Upstream dns servers
# upstream_dns_servers:
# - 8.8.8.8
# - 8.8.4.4

## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
## When openstack is used make sure to source in the openstack credentials
## like you would do when using openstack-client before starting the playbook.
# cloud_provider:

## When cloud_provider is set to 'external', you can set the cloud controller to deploy
## Supported cloud controllers are: 'openstack', 'vsphere', 'huaweicloud' and 'hcloud'
## When openstack or vsphere are used make sure to source in the required fields
# external_cloud_provider:

## Set these proxy values in order to update package manager and docker daemon to use proxies and custom CA for https_proxy if needed
# http_proxy: ""
# https_proxy: ""
# https_proxy_cert_file: ""

## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
# no_proxy: ""

## Some problems may occur when downloading files over https proxy due to ansible bug
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
# download_validate_certs: False

## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
# additional_no_proxy: ""

## If you need to disable proxying of os package repositories but are still behind an http_proxy set
## skip_http_proxy_on_os_packages to true
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
# skip_http_proxy_on_os_packages: false

## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
## no_proxy variable, set below to true:
no_proxy_exclude_workers: false

## Certificate Management
## This setting determines whether certs are generated via scripts.
## Chose 'none' if you provide your own certificates.
## Option is "script", "none"
# cert_management: script

## Set to true to allow pre-checks to fail and continue deployment
# ignore_assert_errors: false

## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
# kube_read_only_port: 10255

## Set true to download and cache container
# download_container: true

## Deploy container engine
# Set false if you want to deploy container engine manually.
# deploy_container_engine: true

## Red Hat Enterprise Linux subscription registration
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
## Update RHEL subscription purpose usage, role and SLA if necessary
# rh_subscription_username: ""
# rh_subscription_password: ""
# rh_subscription_org_id: ""
# rh_subscription_activation_key: ""
# rh_subscription_usage: "Development"
# rh_subscription_role: "Red Hat Enterprise Server"
# rh_subscription_sla: "Self-Support"

## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
# ping_access_ip: true

# sysctl_file_path to add sysctl conf to
# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"

## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
kube_webhook_token_auth: false
kube_webhook_token_auth_url_skip_tls_verify: false
# kube_webhook_token_auth_url: https://...
## base64-encoded string of the webhook's CA certificate
# kube_webhook_token_auth_ca_data: "LS0t..."

## NTP Settings
# Start the ntpd or chrony service and enable it at system boot.
ntp_enabled: false
ntp_manage_config: false
ntp_servers:
- "0.pool.ntp.org iburst"
- "1.pool.ntp.org iburst"
- "2.pool.ntp.org iburst"
- "3.pool.ntp.org iburst"

## Used to control no_log attribute
unsafe_show_logs: false

## If enabled it will allow kubespray to attempt setup even if the distribution is not supported. For unsupported distributions this can lead to unexpected failures in some cases.
allow_unsupported_distribution_setup: false
9 changes: 9 additions & 0 deletions kubernetes/kubespray/inventory/group_vars/all/aws.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
## To use AWS EBS CSI Driver to provision volumes, uncomment the first value
## and configure the parameters below
# aws_ebs_csi_enabled: true
# aws_ebs_csi_enable_volume_scheduling: true
# aws_ebs_csi_enable_volume_snapshot: false
# aws_ebs_csi_enable_volume_resizing: false
# aws_ebs_csi_controller_replicas: 1
# aws_ebs_csi_plugin_image_tag: latest
# aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment'
40 changes: 40 additions & 0 deletions kubernetes/kubespray/inventory/group_vars/all/azure.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values

# azure_cloud:
# azure_tenant_id:
# azure_subscription_id:
# azure_aad_client_id:
# azure_aad_client_secret:
# azure_resource_group:
# azure_location:
# azure_subnet_name:
# azure_security_group_name:
# azure_security_group_resource_group:
# azure_vnet_name:
# azure_vnet_resource_group:
# azure_route_table_name:
# azure_route_table_resource_group:
# supported values are 'standard' or 'vmss'
# azure_vmtype: standard

## Azure Disk CSI credentials and parameters
## see docs/azure-csi.md for details on how to get these values

# azure_csi_tenant_id:
# azure_csi_subscription_id:
# azure_csi_aad_client_id:
# azure_csi_aad_client_secret:
# azure_csi_location:
# azure_csi_resource_group:
# azure_csi_vnet_name:
# azure_csi_vnet_resource_group:
# azure_csi_subnet_name:
# azure_csi_security_group_name:
# azure_csi_use_instance_metadata:
# azure_csi_tags: "Owner=owner,Team=team,Environment=environment'

## To enable Azure Disk CSI, uncomment below
# azure_csi_enabled: true
# azure_csi_controller_replicas: 1
# azure_csi_plugin_image_tag: latest
46 changes: 46 additions & 0 deletions kubernetes/kubespray/inventory/group_vars/all/containerd.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
---
# Please see roles/container-engine/containerd/defaults/main.yml for more configuration options

# containerd_storage_dir: "/var/lib/containerd"
# containerd_state_dir: "/run/containerd"
# containerd_oom_score: 0

# containerd_default_runtime: "runc"
# containerd_snapshotter: "native"

# containerd_runc_runtime:
# name: runc
# type: "io.containerd.runc.v2"
# engine: ""
# root: ""

# containerd_additional_runtimes:
# Example for Kata Containers as additional runtime:
# - name: kata
# type: "io.containerd.kata.v2"
# engine: ""
# root: ""

# containerd_grpc_max_recv_message_size: 16777216
# containerd_grpc_max_send_message_size: 16777216

# containerd_debug_level: "info"

# containerd_metrics_address: ""

# containerd_metrics_grpc_histogram: false

# Registries defined within containerd.
# containerd_registries_mirrors:
# - prefix: docker.io
# mirrors:
# - host: https://registry-1.docker.io
# capabilities: ["pull", "resolve"]
# skip_verify: false

# containerd_max_container_log_line_size: -1

# containerd_registry_auth:
# - registry: 10.0.0.2:5000
# username: user
# password: pass
2 changes: 2 additions & 0 deletions kubernetes/kubespray/inventory/group_vars/all/coreos.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
## Does coreos need auto upgrade, default is true
# coreos_auto_upgrade: true
6 changes: 6 additions & 0 deletions kubernetes/kubespray/inventory/group_vars/all/cri-o.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# crio_insecure_registries:
# - 10.0.0.2:5000
# crio_registry_auth:
# - registry: 10.0.0.2:5000
# username: user
# password: pass
59 changes: 59 additions & 0 deletions kubernetes/kubespray/inventory/group_vars/all/docker.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
---
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
## Please note that overlay2 is only supported on newer kernels
# docker_storage_options: -s overlay2

## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
docker_container_storage_setup: false

## It must be define a disk path for docker_container_storage_setup_devs.
## Otherwise docker-storage-setup will be executed incorrectly.
# docker_container_storage_setup_devs: /dev/vdb

## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver)
## Valid options are systemd or cgroupfs, default is systemd
# docker_cgroup_driver: systemd

## Only set this if you have more than 3 nameservers:
## If true Kubespray will only use the first 3, otherwise it will fail
docker_dns_servers_strict: false

# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"

## Used to set docker daemon iptables options to true
docker_iptables_enabled: "false"

# Docker log options
# Rotate container stderr/stdout logs at 50m and keep last 5
docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"

# define docker bin_dir
docker_bin_dir: "/usr/bin"

# keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
# kubespray deletes the docker package on each run, so caching the package makes sense
docker_rpm_keepcache: 1

## An obvious use case is allowing insecure-registry access to self hosted registries.
## Can be ipaddress and domain_name.
## example define 172.19.16.11 or mirror.registry.io
# docker_insecure_registries:
# - mirror.registry.io
# - 172.19.16.11

## Add other registry,example China registry mirror.
# docker_registry_mirrors:
# - https://registry.docker-cn.com
# - https://mirror.aliyuncs.com

## If non-empty will override default system MountFlags value.
## This option takes a mount propagation flag: shared, slave
## or private, which control whether mounts in the file system
## namespace set up for docker will receive or propagate mounts
## and unmounts. Leave empty for system default
# docker_mount_flags:

## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
# docker_options: ""
16 changes: 16 additions & 0 deletions kubernetes/kubespray/inventory/group_vars/all/etcd.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
---
## Directory where etcd data stored
etcd_data_dir: /var/lib/etcd

## Container runtime
## docker for docker, crio for cri-o and containerd for containerd.
## Additionally you can set this to kubeadm if you want to install etcd using kubeadm
## Kubeadm etcd deployment is experimental and only available for new deployments
## If this is not set, container manager will be inherited from the Kubespray defaults
## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want.
## Also this makes possible to use different container manager for etcd nodes.
# container_manager: containerd

## Settings for etcd deployment type
# Set this to docker if you are using container_manager: docker
etcd_deployment_type: host
10 changes: 10 additions & 0 deletions kubernetes/kubespray/inventory/group_vars/all/gcp.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
## GCP compute Persistent Disk CSI Driver credentials and parameters
## See docs/gcp-pd-csi.md for information about the implementation

## Specify the path to the file containing the service account credentials
# gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json"

## To enable GCP Persistent Disk CSI driver, uncomment below
# gcp_pd_csi_enabled: true
# gcp_pd_csi_controller_replicas: 1
# gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0"
Loading

0 comments on commit 8809420

Please sign in to comment.