diff --git a/Dockerfile b/Dockerfile index b96c7f2..049e11d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM quay.io/operator-framework/helm-operator:v1.25.2 +FROM quay.io/operator-framework/helm-operator:v1.32.0 ENV HOME=/opt/helm COPY watches.yaml ${HOME}/watches.yaml diff --git a/Dockerfile-ARM64 b/Dockerfile-ARM64 deleted file mode 100644 index 098353c..0000000 --- a/Dockerfile-ARM64 +++ /dev/null @@ -1,7 +0,0 @@ -# Build the manager binary -FROM quay.io/operator-framework/helm-operator-arm64:v1.4.2 - -ENV HOME=/opt/helm -COPY watches.yaml ${HOME}/watches.yaml -COPY helm-charts ${HOME}/helm-charts -WORKDIR ${HOME} diff --git a/Makefile b/Makefile index 4f0d7ab..f5b56c4 100644 --- a/Makefile +++ b/Makefile @@ -3,13 +3,13 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 0.0.6 +VERSION ?= 0.0.1 # CHANNELS define the bundle channels used in the bundle. -# Add a new line here if you would like to change its default config. (E.g CHANNELS = "preview,fast,stable") +# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") # To re-generate a bundle for other specific channels without changing the standard setup, you can: -# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=preview,fast,stable) -# - use environment variables to overwrite this value (e.g export CHANNELS="preview,fast,stable") +# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) +# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") ifneq ($(origin CHANNELS), undefined) BUNDLE_CHANNELS := --channels=$(CHANNELS) endif @@ -35,9 +35,25 @@ IMAGE_TAG_BASE ?= kubealex.com/k8s-mediaserver-operator # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) +# BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command +BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + +# USE_IMAGE_DIGESTS defines if images are resolved via tags or digests +# You can enable this value if you would like to use SHA Based Digests +# To enable set flag to true +USE_IMAGE_DIGESTS ?= false +ifeq ($(USE_IMAGE_DIGESTS), true) + BUNDLE_GEN_FLAGS += --use-image-digests +endif + +# Set the Operator SDK version to use. By default, what is installed on the system is used. +# This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. +OPERATOR_SDK_VERSION ?= v1.32.0 + # Image URL to use all building/pushing image targets IMG ?= controller:latest +.PHONY: all all: docker-build ##@ General @@ -53,37 +69,62 @@ all: docker-build # More info on the awk command: # http://linuxcommand.org/lc3_adv_awk.php +.PHONY: help help: ## Display this help. @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) ##@ Build +.PHONY: run run: helm-operator ## Run against the configured Kubernetes cluster in ~/.kube/config $(HELM_OPERATOR) run +.PHONY: docker-build docker-build: ## Build docker image with the manager. docker build -t ${IMG} . +.PHONY: docker-push docker-push: ## Push docker image with the manager. docker push ${IMG} +# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ +# - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> than the export will fail) +# To properly provided solutions that supports more than one platform you should use this option. +PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le +.PHONY: docker-buildx +docker-buildx: test ## Build and push docker image for the manager for cross-platform support + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - docker buildx create --name project-v3-builder + docker buildx use project-v3-builder + - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . + - docker buildx rm project-v3-builder + rm Dockerfile.cross + ##@ Deployment +.PHONY: install install: kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl apply -f - +.PHONY: uninstall uninstall: kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | kubectl delete -f - +.PHONY: deploy deploy: kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} $(KUSTOMIZE) build config/default | kubectl apply -f - +.PHONY: undeploy undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/default | kubectl delete -f - OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') -ARCH := $(shell uname -m | sed 's/x86_64/amd64/') +ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/') .PHONY: kustomize KUSTOMIZE = $(shell pwd)/bin/kustomize @@ -93,7 +134,7 @@ ifeq (,$(shell which kustomize 2>/dev/null)) @{ \ set -e ;\ mkdir -p $(dir $(KUSTOMIZE)) ;\ - curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v3.5.4/kustomize_v3.5.4_$(OS)_$(ARCH).tar.gz | \ + curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v4.5.7/kustomize_v4.5.7_$(OS)_$(ARCH).tar.gz | \ tar xzf - -C bin/ ;\ } else @@ -109,7 +150,7 @@ ifeq (,$(shell which helm-operator 2>/dev/null)) @{ \ set -e ;\ mkdir -p $(dir $(HELM_OPERATOR)) ;\ - curl -sSLo $(HELM_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.8.0/helm-operator_$(OS)_$(ARCH) ;\ + curl -sSLo $(HELM_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.32.0/helm-operator_$(OS)_$(ARCH) ;\ chmod +x $(HELM_OPERATOR) ;\ } else @@ -117,12 +158,28 @@ HELM_OPERATOR = $(shell which helm-operator) endif endif +.PHONY: operator-sdk +OPERATOR_SDK ?= ./bin/operator-sdk +operator-sdk: ## Download operator-sdk locally if necessary. +ifeq (,$(wildcard $(OPERATOR_SDK))) +ifeq (, $(shell which operator-sdk 2>/dev/null)) + @{ \ + set -e ;\ + mkdir -p $(dir $(OPERATOR_SDK)) ;\ + curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$(OS)_$(ARCH) ;\ + chmod +x $(OPERATOR_SDK) ;\ + } +else +OPERATOR_SDK = $(shell which operator-sdk) +endif +endif + .PHONY: bundle -bundle: kustomize ## Generate bundle manifests and metadata, then validate generated files. - operator-sdk generate kustomize manifests -q +bundle: kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. + $(OPERATOR_SDK) generate kustomize manifests -q cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) - $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) - operator-sdk bundle validate ./bundle + $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) + $(OPERATOR_SDK) bundle validate ./bundle .PHONY: bundle-build bundle-build: ## Build the bundle image. @@ -140,7 +197,7 @@ ifeq (,$(shell which opm 2>/dev/null)) @{ \ set -e ;\ mkdir -p $(dir $(OPM)) ;\ - curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$(OS)-$(ARCH)-opm ;\ + curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$(OS)-$(ARCH)-opm ;\ chmod +x $(OPM) ;\ } else diff --git a/PROJECT b/PROJECT index c1533bf..83b9a19 100644 --- a/PROJECT +++ b/PROJECT @@ -1,3 +1,7 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html domain: kubealex.com layout: - helm.sdk.operatorframework.io/v1 @@ -13,4 +17,4 @@ resources: group: charts kind: K8SMediaserver version: v1 -version: "3" \ No newline at end of file +version: "3" diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 57bd564..3a37ac1 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -27,6 +27,4 @@ patchesStrategicMerge: # endpoint w/o any authn/z, please comment the following line. - manager_auth_proxy_patch.yaml -# Mount the controller config file for loading manager configurations -# through a ComponentConfig type -#- manager_config_patch.yaml + diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index f279d10..d90ef09 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -8,6 +8,22 @@ metadata: spec: template: spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux containers: - name: kube-rbac-proxy securityContext: @@ -15,7 +31,7 @@ spec: capabilities: drop: - "ALL" - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 args: - "--secure-listen-address=0.0.0.0:8443" - "--upstream=http://127.0.0.1:8080/" diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml index 6c40015..f6f5891 100644 --- a/config/default/manager_config_patch.yaml +++ b/config/default/manager_config_patch.yaml @@ -8,13 +8,3 @@ spec: spec: containers: - name: manager - args: - - "--config=controller_manager_config.yaml" - volumeMounts: - - name: manager-config - mountPath: /controller_manager_config.yaml - subPath: controller_manager_config.yaml - volumes: - - name: manager-config - configMap: - name: manager-config diff --git a/config/manager/controller_manager_config.yaml b/config/manager/controller_manager_config.yaml deleted file mode 100644 index 5d30b8e..0000000 --- a/config/manager/controller_manager_config.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 -kind: ControllerManagerConfig -health: - healthProbeBindAddress: :8081 -metrics: - bindAddress: 127.0.0.1:8080 - -leaderElection: - leaderElect: true - resourceName: 811c9dc5.kubealex.com -# leaderElectionReleaseOnCancel defines if the leader should step down volume -# when the Manager ends. This requires the binary to immediately end when the -# Manager is stopped, otherwise, this setting is unsafe. Setting this significantly -# speeds up voluntary leader transitions as the new leader don't have to wait -# LeaseDuration time first. -# In the default scaffold provided, the program ends immediately after -# the manager stops, so would be fine to enable this option. However, -# if you are doing or is intended to do any operation such as perform cleanups -# after the manager stops then its usage might be unsafe. -# leaderElectionReleaseOnCancel: true diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 2bcd3ee..5c5f0b8 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,10 +1,2 @@ resources: - manager.yaml - -generatorOptions: - disableNameSuffixHash: true - -configMapGenerator: -- name: manager-config - files: - - controller_manager_config.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 4eeefe4..c1028f0 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -3,6 +3,12 @@ kind: Namespace metadata: labels: control-plane: controller-manager + app.kubernetes.io/name: namespace + app.kubernetes.io/instance: system + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: system --- apiVersion: apps/v1 @@ -12,6 +18,12 @@ metadata: namespace: system labels: control-plane: controller-manager + app.kubernetes.io/name: deployment + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize spec: selector: matchLabels: @@ -24,6 +36,26 @@ spec: labels: control-plane: controller-manager spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux securityContext: runAsNonRoot: true # TODO(user): For common cases that do not require escalating privileges diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml index d19136a..7b580b1 100644 --- a/config/prometheus/monitor.yaml +++ b/config/prometheus/monitor.yaml @@ -5,6 +5,12 @@ kind: ServiceMonitor metadata: labels: control-plane: controller-manager + app.kubernetes.io/name: servicemonitor + app.kubernetes.io/instance: controller-manager-metrics-monitor + app.kubernetes.io/component: metrics + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: controller-manager-metrics-monitor namespace: system spec: diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml index 51a75db..4aa72a6 100644 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -1,6 +1,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: metrics-reader rules: - nonResourceURLs: diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml index 80e1857..74cadd1 100644 --- a/config/rbac/auth_proxy_role.yaml +++ b/config/rbac/auth_proxy_role.yaml @@ -1,6 +1,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: proxy-role + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: proxy-role rules: - apiGroups: diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml index ec7acc0..dadeca7 100644 --- a/config/rbac/auth_proxy_role_binding.yaml +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -1,6 +1,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: proxy-rolebinding + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: proxy-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index 71f1797..ccc790f 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -3,6 +3,12 @@ kind: Service metadata: labels: control-plane: controller-manager + app.kubernetes.io/name: service + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: controller-manager-metrics-service namespace: system spec: diff --git a/config/rbac/k8smediaserver_editor_role.yaml b/config/rbac/k8smediaserver_editor_role.yaml index acb668a..170be98 100644 --- a/config/rbac/k8smediaserver_editor_role.yaml +++ b/config/rbac/k8smediaserver_editor_role.yaml @@ -2,6 +2,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: k8smediaserver-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: k8smediaserver-editor-role rules: - apiGroups: diff --git a/config/rbac/k8smediaserver_viewer_role.yaml b/config/rbac/k8smediaserver_viewer_role.yaml index 95c77ae..1b35d7f 100644 --- a/config/rbac/k8smediaserver_viewer_role.yaml +++ b/config/rbac/k8smediaserver_viewer_role.yaml @@ -2,6 +2,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: k8smediaserver-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: k8smediaserver-viewer-role rules: - apiGroups: diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml index 4190ec8..030fe62 100644 --- a/config/rbac/leader_election_role.yaml +++ b/config/rbac/leader_election_role.yaml @@ -2,6 +2,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: + labels: + app.kubernetes.io/name: role + app.kubernetes.io/instance: leader-election-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: leader-election-role rules: - apiGroups: diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml index 1d1321e..a44d887 100644 --- a/config/rbac/leader_election_role_binding.yaml +++ b/config/rbac/leader_election_role_binding.yaml @@ -1,6 +1,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: + labels: + app.kubernetes.io/name: rolebinding + app.kubernetes.io/instance: leader-election-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: leader-election-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 55ce69d..2b3eb39 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -45,7 +45,15 @@ rules: - patch - update - watch -- apiGroups: +- verbs: + - "*" + apiGroups: + - "networking.k8s.io" + resources: + - "ingresses" +- verbs: + - "*" + apiGroups: - "" resources: - pods @@ -56,33 +64,13 @@ rules: - events - configmaps - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - apps +- verbs: + - "*" + apiGroups: + - "apps" resources: - deployments - daemonsets - replicasets - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "networking.k8s.io" - verbs: - - "*" - resources: - - "ingresses" #+kubebuilder:scaffold:rules diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index 2070ede..0d227be 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -1,6 +1,13 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: manager-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: manager-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml index 7cd6025..882b96d 100644 --- a/config/rbac/service_account.yaml +++ b/config/rbac/service_account.yaml @@ -1,5 +1,12 @@ apiVersion: v1 kind: ServiceAccount metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: controller-manager-sa + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/part-of: k8s-mediaserver-operator + app.kubernetes.io/managed-by: kustomize name: controller-manager namespace: system diff --git a/config/samples/charts_v1_k8smediaserver.yaml b/config/samples/charts_v1_k8smediaserver.yaml index 3d5ed8f..1d73557 100644 --- a/config/samples/charts_v1_k8smediaserver.yaml +++ b/config/samples/charts_v1_k8smediaserver.yaml @@ -5,18 +5,23 @@ metadata: spec: # Default values copied from /helm-charts/k8s-mediaserver/values.yaml general: - image_tag: latest ingress: ingressClassName: "" ingress_host: k8s-mediaserver.k8s.test - pgid: 1000 + nodeSelector: {} plex_ingress_host: k8s-plex.k8s.test + image_tag: latest + podDistribution: cluster + # UID to run the process with puid: 1000 + # GID to run the process with + pgid: 1000 + # Persistent storage selections and pathing storage: - customVolume: false + customVolume: false # set to true if not using a PVC (must provide volume below) pvcName: mediaserver-pvc - pvcStorageClass: [] size: 5Gi + pvcStorageClass: "" subPaths: config: config downloads: downloads @@ -27,6 +32,7 @@ spec: volumes: {} jackett: container: + image: docker.io/linuxserver/jackett nodeSelector: {} port: 9117 enabled: true @@ -39,13 +45,16 @@ spec: secretName: "" resources: {} service: + extraLBAnnotations: {} extraLBService: false nodePort: null port: 9117 type: ClusterIP + volume: {} plex: claim: CHANGEME container: + image: docker.io/linuxserver/plex nodeSelector: {} port: 32400 enabled: true @@ -58,12 +67,37 @@ spec: replicaCount: 1 resources: {} service: + extraLBAnnotations: {} extraLBService: false nodePort: null port: 32400 type: ClusterIP + volume: {} + prowlarr: + container: + image: docker.io/linuxserver/prowlarr + nodeSelector: {} + port: 9696 + tag: develop + enabled: true + ingress: + annotations: {} + enabled: true + path: /prowlarr + tls: + enabled: false + secretName: "" + resources: {} + service: + extraLBAnnotations: {} + extraLBService: false + nodePort: null + port: 9696 + type: ClusterIP + volume: {} radarr: container: + image: docker.io/linuxserver/radarr nodeSelector: {} port: 7878 enabled: true @@ -76,12 +110,15 @@ spec: secretName: "" resources: {} service: + extraLBAnnotations: {} extraLBService: false nodePort: null port: 7878 type: ClusterIP + volume: {} sabnzbd: container: + image: docker.io/linuxserver/sabnzbd nodeSelector: {} port: http: 8080 @@ -97,17 +134,21 @@ spec: resources: {} service: http: + extraLBAnnotations: {} extraLBService: false nodePort: null port: 8080 type: ClusterIP https: + extraLBAnnotations: {} extraLBService: false nodePort: null port: 9090 type: ClusterIP + volume: {} sonarr: container: + image: docker.io/linuxserver/sonarr nodeSelector: {} port: 8989 enabled: true @@ -120,10 +161,12 @@ spec: secretName: "" resources: {} service: + extraLBAnnotations: {} extraLBService: false nodePort: null port: 8989 type: ClusterIP + volume: {} transmission: config: auth: @@ -131,6 +174,7 @@ spec: password: "" username: "" container: + image: docker.io/linuxserver/transmission nodeSelector: {} port: peer: 51413 @@ -146,15 +190,18 @@ spec: resources: {} service: peer: + extraLBAnnotations: {} extraLBService: false nodePort: null nodePortUDP: null port: 51413 type: ClusterIP utp: + extraLBAnnotations: {} extraLBService: false nodePort: null port: 9091 type: ClusterIP - - + volume: {} + + diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 3c17b8a..0b216c2 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,4 +1,4 @@ -## Append samples you want in your CSV to this file as resources ## +## Append samples of your project ## resources: - charts_v1_k8smediaserver.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/scorecard/patches/basic.config.yaml b/config/scorecard/patches/basic.config.yaml index 90f7ef7..472a988 100644 --- a/config/scorecard/patches/basic.config.yaml +++ b/config/scorecard/patches/basic.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.23.0 + image: quay.io/operator-framework/scorecard-test:v1.32.0 labels: suite: basic test: basic-check-spec-test diff --git a/config/scorecard/patches/olm.config.yaml b/config/scorecard/patches/olm.config.yaml index b55840e..343c6d8 100644 --- a/config/scorecard/patches/olm.config.yaml +++ b/config/scorecard/patches/olm.config.yaml @@ -4,7 +4,7 @@ entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.23.0 + image: quay.io/operator-framework/scorecard-test:v1.32.0 labels: suite: olm test: olm-bundle-validation-test @@ -14,7 +14,7 @@ entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.23.0 + image: quay.io/operator-framework/scorecard-test:v1.32.0 labels: suite: olm test: olm-crds-have-validation-test @@ -24,7 +24,7 @@ entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.23.0 + image: quay.io/operator-framework/scorecard-test:v1.32.0 labels: suite: olm test: olm-crds-have-resources-test @@ -34,7 +34,7 @@ entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.23.0 + image: quay.io/operator-framework/scorecard-test:v1.32.0 labels: suite: olm test: olm-spec-descriptors-test @@ -44,7 +44,7 @@ entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.23.0 + image: quay.io/operator-framework/scorecard-test:v1.32.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/helm-charts/k8s-mediaserver/Chart.yaml b/helm-charts/k8s-mediaserver/Chart.yaml index 7f96310..6fc6f7a 100644 --- a/helm-charts/k8s-mediaserver/Chart.yaml +++ b/helm-charts/k8s-mediaserver/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -appVersion: 1.16.0 +appVersion: 0.9.1 description: A Helm chart for Kubernetes mediaserver name: k8s-mediaserver type: application -version: 0.7.0 +version: 0.9.1 diff --git a/helm-charts/k8s-mediaserver/templates/jackett-resources.yml b/helm-charts/k8s-mediaserver/templates/jackett-resources.yml index 6db0db2..e9c96d4 100644 --- a/helm-charts/k8s-mediaserver/templates/jackett-resources.yml +++ b/helm-charts/k8s-mediaserver/templates/jackett-resources.yml @@ -72,8 +72,9 @@ spec: image: "{{ .Values.jackett.container.image }}:{{ .Values.jackett.container.tag | default .Values.general.image_tag }}" imagePullPolicy: Always readinessProbe: - tcpSocket: + httpGet: port: {{ .Values.jackett.container.port }} + path: /{{ .Values.radarr.ingress.path }}/UI/Dashboard initialDelaySeconds: 5 periodSeconds: 10 ports: diff --git a/helm-charts/k8s-mediaserver/templates/plex-resources.yml b/helm-charts/k8s-mediaserver/templates/plex-resources.yml index f1dcf86..fc06c6c 100644 --- a/helm-charts/k8s-mediaserver/templates/plex-resources.yml +++ b/helm-charts/k8s-mediaserver/templates/plex-resources.yml @@ -52,8 +52,9 @@ spec: image: "{{ .Values.plex.container.image }}:{{ .Values.plex.container.tag | default .Values.general.image_tag }}" imagePullPolicy: Always readinessProbe: - tcpSocket: + httpGet: port: {{ .Values.plex.container.port }} + path: "/" initialDelaySeconds: 20 periodSeconds: 15 ports: diff --git a/helm-charts/k8s-mediaserver/templates/prowlarr-resources.yml b/helm-charts/k8s-mediaserver/templates/prowlarr-resources.yml index 5002e87..a51c676 100644 --- a/helm-charts/k8s-mediaserver/templates/prowlarr-resources.yml +++ b/helm-charts/k8s-mediaserver/templates/prowlarr-resources.yml @@ -74,7 +74,8 @@ spec: image: "{{ .Values.prowlarr.container.image }}:{{ .Values.prowlarr.container.tag | default .Values.general.image_tag }}" imagePullPolicy: Always readinessProbe: - tcpSocket: + httpGet: + path: "/{{ .Values.prowlarr.ingress.path }}/ping" port: {{ .Values.prowlarr.container.port }} initialDelaySeconds: 10 periodSeconds: 20 diff --git a/helm-charts/k8s-mediaserver/templates/radarr-resources.yml b/helm-charts/k8s-mediaserver/templates/radarr-resources.yml index f98b341..9624770 100644 --- a/helm-charts/k8s-mediaserver/templates/radarr-resources.yml +++ b/helm-charts/k8s-mediaserver/templates/radarr-resources.yml @@ -75,7 +75,8 @@ spec: image: "{{ .Values.radarr.container.image }}:{{ .Values.radarr.container.tag | default .Values.general.image_tag }}" imagePullPolicy: Always readinessProbe: - tcpSocket: + httpGet: + path: "/{{ .Values.radarr.ingress.path }}/ping" port: {{ .Values.radarr.container.port }} initialDelaySeconds: 10 periodSeconds: 20 diff --git a/helm-charts/k8s-mediaserver/templates/sabnzbd-resources.yml b/helm-charts/k8s-mediaserver/templates/sabnzbd-resources.yml index e424d9b..b829810 100644 --- a/helm-charts/k8s-mediaserver/templates/sabnzbd-resources.yml +++ b/helm-charts/k8s-mediaserver/templates/sabnzbd-resources.yml @@ -377,7 +377,8 @@ spec: image: "{{ .Values.sabnzbd.container.image }}:{{ .Values.sabnzbd.container.tag | default .Values.general.image_tag }}" imagePullPolicy: Always readinessProbe: - tcpSocket: + httpGet: + path: /{{ .Values.sabnzbd.ingress.path }}/wizard/ port: {{ .Values.sabnzbd.container.port.http }} initialDelaySeconds: 5 periodSeconds: 10 diff --git a/helm-charts/k8s-mediaserver/templates/sonarr-resources.yml b/helm-charts/k8s-mediaserver/templates/sonarr-resources.yml index 3cb513b..894ffac 100644 --- a/helm-charts/k8s-mediaserver/templates/sonarr-resources.yml +++ b/helm-charts/k8s-mediaserver/templates/sonarr-resources.yml @@ -74,7 +74,8 @@ spec: image: "{{ .Values.sonarr.container.image }}:{{ .Values.sonarr.container.tag | default .Values.general.image_tag }}" imagePullPolicy: Always readinessProbe: - tcpSocket: + httpGet: + path: "/{{ .Values.sonarr.ingress.path }}/ping" port: {{ .Values.sonarr.container.port }} initialDelaySeconds: 10 periodSeconds: 20 diff --git a/helm-charts/k8s-mediaserver/templates/transmission-resources.yml b/helm-charts/k8s-mediaserver/templates/transmission-resources.yml index bc3c5a0..e0c8098 100644 --- a/helm-charts/k8s-mediaserver/templates/transmission-resources.yml +++ b/helm-charts/k8s-mediaserver/templates/transmission-resources.yml @@ -141,7 +141,8 @@ spec: image: "{{ .Values.transmission.container.image }}:{{ .Values.transmission.container.tag | default .Values.general.image_tag }}" imagePullPolicy: Always readinessProbe: - tcpSocket: + httpGet: + path: /{{ .Values.transmission.ingress.path }}/ port: {{ .Values.transmission.container.port.utp }} initialDelaySeconds: 5 periodSeconds: 10 diff --git a/helm-charts/k8s-mediaserver/values.yaml b/helm-charts/k8s-mediaserver/values.yaml index 2997dbb..3d591e2 100644 --- a/helm-charts/k8s-mediaserver/values.yaml +++ b/helm-charts/k8s-mediaserver/values.yaml @@ -15,6 +15,7 @@ general: storage: customVolume: false #set to true if not using a PVC (must provide volume below) pvcName: mediaserver-pvc + accessMode: "" size: 5Gi pvcStorageClass: "" # the path starting from the top level of the pv you're passing. If your share is server.local/share/, then tv is server.local/share/media/tv diff --git a/k8s-mediaserver-operator-arm64.yml b/k8s-mediaserver-operator-arm64.yml deleted file mode 100644 index 8c949fb..0000000 --- a/k8s-mediaserver-operator-arm64.yml +++ /dev/null @@ -1,368 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - name: k8s-mediaserver-operator-system ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: k8smediaservers.charts.kubealex.com -spec: - group: charts.kubealex.com - names: - kind: K8SMediaserver - listKind: K8SMediaserverList - plural: k8smediaservers - singular: k8smediaserver - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: K8SMediaserver is the Schema for the k8smediaservers API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec defines the desired state of K8SMediaserver - type: object - x-kubernetes-preserve-unknown-fields: true - status: - description: Status defines the observed state of K8SMediaserver - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: k8s-mediaserver-operator-controller-manager - namespace: k8s-mediaserver-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: k8s-mediaserver-operator-leader-election-role - namespace: k8s-mediaserver-operator-system -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: k8s-mediaserver-operator-manager-role -rules: -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - '*' -- apiGroups: - - "" - resources: - - events - verbs: - - create -- apiGroups: - - charts.kubealex.com - resources: - - k8smediaservers - - k8smediaservers/status - - k8smediaservers/finalizers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - pods - - services - - services/finalizers - - endpoints - - persistentvolumeclaims - - events - - configmaps - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - networking.k8s.io - resources: - - ingresses - verbs: - - create - - delete - - get - - list - - patch - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: k8s-mediaserver-operator-metrics-reader -rules: -- nonResourceURLs: - - /metrics - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: k8s-mediaserver-operator-proxy-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: k8s-mediaserver-operator-leader-election-rolebinding - namespace: k8s-mediaserver-operator-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: k8s-mediaserver-operator-leader-election-role -subjects: -- kind: ServiceAccount - name: k8s-mediaserver-operator-controller-manager - namespace: k8s-mediaserver-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: k8s-mediaserver-operator-manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: k8s-mediaserver-operator-manager-role -subjects: -- kind: ServiceAccount - name: k8s-mediaserver-operator-controller-manager - namespace: k8s-mediaserver-operator-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: k8s-mediaserver-operator-proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: k8s-mediaserver-operator-proxy-role -subjects: -- kind: ServiceAccount - name: k8s-mediaserver-operator-controller-manager - namespace: k8s-mediaserver-operator-system ---- -apiVersion: v1 -data: - controller_manager_config.yaml: | - apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 - kind: ControllerManagerConfig - health: - healthProbeBindAddress: :8081 - metrics: - bindAddress: 127.0.0.1:8080 - - leaderElection: - leaderElect: true - resourceName: 811c9dc5.kubealex.com - # leaderElectionReleaseOnCancel defines if the leader should step down volume - # when the Manager ends. This requires the binary to immediately end when the - # Manager is stopped, otherwise, this setting is unsafe. Setting this significantly - # speeds up voluntary leader transitions as the new leader don't have to wait - # LeaseDuration time first. - # In the default scaffold provided, the program ends immediately after - # the manager stops, so would be fine to enable this option. However, - # if you are doing or is intended to do any operation such as perform cleanups - # after the manager stops then its usage might be unsafe. - # leaderElectionReleaseOnCancel: true -kind: ConfigMap -metadata: - name: k8s-mediaserver-operator-manager-config - namespace: k8s-mediaserver-operator-system ---- -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - name: k8s-mediaserver-operator-controller-manager-metrics-service - namespace: k8s-mediaserver-operator-system -spec: - ports: - - name: https - port: 8443 - protocol: TCP - targetPort: https - selector: - control-plane: controller-manager ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - control-plane: controller-manager - name: k8s-mediaserver-operator-controller-manager - namespace: k8s-mediaserver-operator-system -spec: - replicas: 1 - selector: - matchLabels: - control-plane: controller-manager - template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: manager - labels: - control-plane: controller-manager - spec: - containers: - - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0-arm64 - name: kube-rbac-proxy - ports: - - containerPort: 8443 - name: https - protocol: TCP - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - - args: - - --health-probe-bind-address=:8081 - - --metrics-addr=127.0.0.1:8080 - - --leader-election-id=k8s-mediaserver-operator - image: quay.io/kubealex/k8s-mediaserver-operator:v0.9.0-arm64 - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - name: manager - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 10m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - securityContext: - runAsNonRoot: true - serviceAccountName: k8s-mediaserver-operator-controller-manager - terminationGracePeriodSeconds: 10 diff --git a/k8s-mediaserver-operator.yml b/k8s-mediaserver-operator.yml index e40ab8f..7ea3bd8 100644 --- a/k8s-mediaserver-operator.yml +++ b/k8s-mediaserver-operator.yml @@ -2,6 +2,12 @@ apiVersion: v1 kind: Namespace metadata: labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/instance: system + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: namespace + app.kubernetes.io/part-of: k8s-mediaserver-operator control-plane: controller-manager name: k8s-mediaserver-operator-system --- @@ -52,12 +58,26 @@ spec: apiVersion: v1 kind: ServiceAccount metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/instance: controller-manager-sa + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/part-of: k8s-mediaserver-operator name: k8s-mediaserver-operator-controller-manager namespace: k8s-mediaserver-operator-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/instance: leader-election-role + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: role + app.kubernetes.io/part-of: k8s-mediaserver-operator name: k8s-mediaserver-operator-leader-election-role namespace: k8s-mediaserver-operator-system rules: @@ -142,13 +162,7 @@ rules: - configmaps - secrets verbs: - - create - - delete - - get - - list - - patch - - update - - watch + - '*' - apiGroups: - apps resources: @@ -157,29 +171,24 @@ rules: - replicasets - statefulsets verbs: - - create - - delete - - get - - list - - patch - - update - - watch + - '*' - apiGroups: - networking.k8s.io resources: - ingresses verbs: - - create - - delete - - get - - list - - patch - - update - - watch + - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrole + app.kubernetes.io/part-of: k8s-mediaserver-operator name: k8s-mediaserver-operator-metrics-reader rules: - nonResourceURLs: @@ -190,6 +199,13 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/instance: proxy-role + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrole + app.kubernetes.io/part-of: k8s-mediaserver-operator name: k8s-mediaserver-operator-proxy-role rules: - apiGroups: @@ -208,6 +224,13 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/instance: leader-election-rolebinding + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: rolebinding + app.kubernetes.io/part-of: k8s-mediaserver-operator name: k8s-mediaserver-operator-leader-election-rolebinding namespace: k8s-mediaserver-operator-system roleRef: @@ -222,6 +245,13 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/instance: manager-rolebinding + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: k8s-mediaserver-operator name: k8s-mediaserver-operator-manager-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io @@ -235,6 +265,13 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/instance: proxy-rolebinding + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: k8s-mediaserver-operator name: k8s-mediaserver-operator-proxy-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io @@ -246,37 +283,15 @@ subjects: namespace: k8s-mediaserver-operator-system --- apiVersion: v1 -data: - controller_manager_config.yaml: | - apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 - kind: ControllerManagerConfig - health: - healthProbeBindAddress: :8081 - metrics: - bindAddress: 127.0.0.1:8080 - - leaderElection: - leaderElect: true - resourceName: 811c9dc5.kubealex.com - # leaderElectionReleaseOnCancel defines if the leader should step down volume - # when the Manager ends. This requires the binary to immediately end when the - # Manager is stopped, otherwise, this setting is unsafe. Setting this significantly - # speeds up voluntary leader transitions as the new leader don't have to wait - # LeaseDuration time first. - # In the default scaffold provided, the program ends immediately after - # the manager stops, so would be fine to enable this option. However, - # if you are doing or is intended to do any operation such as perform cleanups - # after the manager stops then its usage might be unsafe. - # leaderElectionReleaseOnCancel: true -kind: ConfigMap -metadata: - name: k8s-mediaserver-operator-manager-config - namespace: k8s-mediaserver-operator-system ---- -apiVersion: v1 kind: Service metadata: labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: service + app.kubernetes.io/part-of: k8s-mediaserver-operator control-plane: controller-manager name: k8s-mediaserver-operator-controller-manager-metrics-service namespace: k8s-mediaserver-operator-system @@ -293,6 +308,12 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: k8s-mediaserver-operator + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: k8s-mediaserver-operator control-plane: controller-manager name: k8s-mediaserver-operator-controller-manager namespace: k8s-mediaserver-operator-system @@ -308,13 +329,29 @@ spec: labels: control-plane: controller-manager spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux containers: - args: - --secure-listen-address=0.0.0.0:8443 - --upstream=http://127.0.0.1:8080/ - --logtostderr=true - --v=0 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 name: kube-rbac-proxy ports: - containerPort: 8443 diff --git a/k8s-mediaserver.yml b/k8s-mediaserver.yml index 3a98b72..1305a5c 100644 --- a/k8s-mediaserver.yml +++ b/k8s-mediaserver.yml @@ -9,16 +9,17 @@ spec: ingress_host: k8s-mediaserver.k8s.test plex_ingress_host: k8s-plex.k8s.test image_tag: latest - # UID to run the process with + podDistribution: cluster # can be "spread" or "cluster" + #UID to run the process with puid: 1000 - # GID to run the process with + #GID to run the process with pgid: 1000 - # Persistent storage selections and pathing + #Persistent storage selections and pathing storage: - customVolume: false # set to true if not using a PVC (must provide volume below) + customVolume: false #set to true if not using a PVC (must provide volume below) + accessMode: "" pvcName: mediaserver-pvc size: 5Gi - accessMode: "" pvcStorageClass: "" # the path starting from the top level of the pv you're passing. If your share is server.local/share/, then tv is server.local/share/media/tv subPaths: @@ -33,6 +34,7 @@ spec: # path: /mnt/share ingress: ingressClassName: "" + nodeSelector: {} sonarr: enabled: true @@ -45,10 +47,8 @@ spec: port: 8989 nodePort: extraLBService: false + extraLBAnnotations: {} # Defines an additional LB service, requires cloud provider service or MetalLB - # extraLBService: - # annotations: - # my-annotation: my-value ingress: enabled: true annotations: {} @@ -58,15 +58,15 @@ spec: secretName: "" resources: {} volume: {} - # name: pvc-sonarr-config - # storageClassName: longhorn - # annotations: - # my-annotation/test: my-value - # labels: - # my-label/test: my-other-value - # accessModes: ReadWriteOnce - # storage: 5Gi - # selector: {} + #name: pvc-sonarr-config + #storageClassName: longhorn + #annotations: + # my-annotation/test: my-value + #labels: + # my-label/test: my-other-value + #accessModes: ReadWriteOnce + #storage: 5Gi + #selector: {} radarr: enabled: true @@ -78,11 +78,9 @@ spec: type: ClusterIP port: 7878 nodePort: - extraLBService: false # Defines an additional LB service, requires cloud provider service or MetalLB - # extraLBService: - # annotations: - # my-annotation: my-value + extraLBService: false + extraLBAnnotations: {} ingress: enabled: true annotations: {} @@ -92,13 +90,13 @@ spec: secretName: "" resources: {} volume: {} - # name: pvc-radarr-config - # storageClassName: longhorn - # annotations: {} - # labels: {} - # accessModes: ReadWriteOnce - # storage: 5Gi - # selector: {} + #name: pvc-radarr-config + #storageClassName: longhorn + #annotations: {} + #labels: {} + #accessModes: ReadWriteOnce + #storage: 5Gi + #selector: {} jackett: enabled: true @@ -111,10 +109,8 @@ spec: port: 9117 nodePort: extraLBService: false + extraLBAnnotations: {} # Defines an additional LB service, requires cloud provider service or MetalLB - # extraLBService: - # annotations: - # my-annotation: my-value ingress: enabled: true annotations: {} @@ -144,19 +140,20 @@ spec: utp: type: ClusterIP port: 9091 + # if type is NodePort, nodePort must be set nodePort: # Defines an additional LB service, requires cloud provider service or MetalLB extraLBService: false + extraLBAnnotations: {} peer: type: ClusterIP port: 51413 + # if type is NodePort, nodePort and nodePortUDP must be set nodePort: nodePortUDP: # Defines an additional LB service, requires cloud provider service or MetalLB extraLBService: false - # extraLBService: - # annotations: - # my-annotation: my-value + extraLBAnnotations: {} ingress: enabled: true annotations: {} @@ -194,15 +191,14 @@ spec: nodePort: # Defines an additional LB service, requires cloud provider service or MetalLB extraLBService: false + extraLBAnnotations: {} https: type: ClusterIP port: 9090 nodePort: # Defines an additional LB service, requires cloud provider service or MetalLB extraLBService: false - # extraLBService: - # annotations: - # my-annotation: my-value + extraLBAnnotations: {} ingress: enabled: true annotations: {} @@ -232,6 +228,7 @@ spec: port: 9696 nodePort: extraLBService: false + extraLBAnnotations: {} ingress: enabled: true annotations: {} @@ -241,6 +238,13 @@ spec: secretName: "" resources: {} volume: {} + # name: pvc-prowlarr-config + # storageClassName: longhorn + # annotations: {} + # labels: {} + # accessModes: ReadWriteOnce + # storage: 5Gi + # selector: {} plex: enabled: true @@ -256,6 +260,7 @@ spec: nodePort: # Defines an additional LB service, requires cloud provider service or MetalLB extraLBService: false + extraLBAnnotations: {} ingress: enabled: true annotations: {}