diff --git a/Makefile b/Makefile index 4599e91..3dd680c 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ REGISTRY_USER_NAME?="" REGISTRY_PASSWORD?="" # Image URL to use all building/pushing image targets -COST_EXPORTER_IMG ?= "${REGISTRY}/${REGISTRY_NAMESPACE}/fadvisor-cost-exporter:${GIT_VERSION}" +FADVISOR_IMG ?= "${REGISTRY}/${REGISTRY_NAMESPACE}/fadvisor:${GIT_VERSION}" # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -98,32 +98,32 @@ test: fmt vet goimports ## Run tests. .PHONY: build -build: cost-exporter +build: fadvisor .PHONY: all -all: test lint vet cost-exporter +all: test lint vet fadvisor -.PHONY: cost-exporter -cost-exporter: ## Build binary with the cost-exporter. - CGO_ENABLED=0 GOOS=$(GOOS) go build -ldflags $(LDFLAGS) -o bin/cost-exporter cmd/cost-exporter/main.go +.PHONY: fadvisor +fadvisor: ## Build binary with the fadvisor. + CGO_ENABLED=0 GOOS=$(GOOS) go build -ldflags $(LDFLAGS) -o bin/fadvisor cmd/fadvisor/main.go .PHONY: images -images: image-cost-exporter +images: image-fadvisor -.PHONY: image-cost-exporter -image-cost-exporter: test ## Build docker image with the cost-exporter. - docker build --build-arg LDFLAGS=$(LDFLAGS) --build-arg PKGNAME=cost-exporter -t ${COST_EXPORTER_IMG} . +.PHONY: image-fadvisor +image-fadvisor: test ## Build docker image with the fadvisor. + docker build --build-arg LDFLAGS=$(LDFLAGS) --build-arg PKGNAME=fadvisor -t ${FADVISOR_IMG} . .PHONY: push-images -push-images: push-image-cost-exporter +push-images: push-image-fadvisor -.PHONY: push-image-cost-exporter -push-image-cost-exporter: ## Push images. +.PHONY: push-image-fadvisor +push-image-fadvisor: ## Push images. ifneq ($(REGISTRY_USER_NAME), "") docker login -u $(REGISTRY_USER_NAME) -p $(REGISTRY_PASSWORD) ${REGISTRY} endif - docker push ${COST_EXPORTER_IMG} + docker push ${FADVISOR_IMG} # go-get-tool will 'go get' any package $2 and install it to $1. @@ -140,20 +140,6 @@ rm -rf $$TMP_DIR ;\ } endef -controller-gen: -ifeq (, $(shell which controller-gen)) - @{ \ - set -e ;\ - CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\ - cd $$CONTROLLER_GEN_TMP_DIR ;\ - go mod init tmp ;\ - go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0 ;\ - rm -rf $$CONTROLLER_GEN_TMP_DIR ;\ - } -CONTROLLER_GEN=$(shell go env GOPATH)/bin/controller-gen -else -CONTROLLER_GEN=$(shell which controller-gen) -endif golangci-lint: ifeq (, $(shell which golangci-lint)) diff --git a/README.md b/README.md index 261353d..371049e 100644 --- a/README.md +++ b/README.md @@ -33,95 +33,115 @@ Cost model is an implementation of the cost allocation and showback & chargeback So the containers price in different node type is different. # Tutorial -Cost-Exporter is a metrics exporter which collects cloud instance price information by calling Cloud Billing API and exports the price information as metrics. +Fadvisor is a metrics exporter which collects cloud instance price information by calling Cloud Billing API and exports the price information as metrics. Any cloud provider can implement the API and Crane will work for the specific Cloud, Tencent Cloud is supported in current release. -## Deploy all components by one command +## PreRequests +Install Prometheus ``` -helm install fadvisor deploy/helm/fadvisor -n crane-system --set cost-exporter.extraArgs.provider={cloud provider, now support qcloud} --set cost-exporter.extraArgs.secretid={{your cloud secret id}} --set cost-exporter.extraArgs.secretkey={{your cloud secret key}} +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm install prometheus -n crane-system --set pushgateway.enabled=false --set alertmanager.enabled=false --set server.persistentVolume.enabled=false -f https://raw.githubusercontent.com/gocrane/helm-charts/main/integration/prometheus/override_values.yaml --create-namespace prometheus-community/prometheus + ``` -Except cost-exporter, it will install following components in your system by default. +Install Grafana ``` -dependencies: - - name: kube-state-metrics - condition: fadvisor.kube-state-metrics.enabled,kube-state-metrics.enabled - repository: file://./charts/kube-state-metrics - - name: node-exporter - condition: fadvisor.node-exporter.enabled,node-exporter.enabled - repository: file://./charts/node-exporter - - name: prometheus - condition: fadvisor.prometheus.enabled,prometheus.enabled - repository: file://./charts/prometheus - - name: grafana - condition: fadvisor.grafana.enabled,grafana.enabled - repository: file://./charts/grafana +helm repo add grafana https://grafana.github.io/helm-charts +helm install grafana -f https://raw.githubusercontent.com/gocrane/helm-charts/main/integration/grafana/override_values.yaml -n crane-system --create-namespace grafana/grafana ``` -Install on local, it will use default config. +## Deploy fadvisor +### Deploy on local +Install on local such as mac desktop, it will use default config. you can also deploy on cloud by this way, but it will use default config. ``` -helm install fadvisor deploy/helm/fadvisor -n crane-system +helm repo add crane https://gocrane.github.io/helm-charts +helm install fadvisor -n crane-system --create-namespace crane/fadvisor ``` -## Install one by one - -To install cost-exporter, you must specify cloud provider and your cloud account credentials as secretid & secretkey. -If you are running Crane in your private cloud, default price will be applied. +### Deploy on cloud +If you deploy fadvisor on cloud, now it support tencent cloud. you need provide a qcloud config file which include cloud credentials as following, make sure you specify your `clusterId`,`secretId`,`secretKey`,`region` ``` -helm install cost-exporter deploy/helm/fadvisor/charts/cost-exporter -n crane-system --set extraArgs.provider={cloud provider, now support qcloud} --set extraArgs.secretid={{your cloud secret id}} --set extraArgs.secretkey={{your cloud secret key}} +[credentials] +clusterId={your cluster id} +appId=app1 +secretId={your cloud provider credential secret id} +secretKey={your cloud provider credential secret key} +[clientProfile] +defaultLimit=100 +defaultLanguage=zh-CN +defaultTimeoutSeconds=10 +region={your cluster region, such as ap-beijing、ap-shanghai、ap-guangzhou、ap-shenzhen and so on, you can find region name in your cloud provider console} +domainSuffix=internal.tencentcloudapi.com +scheme= ``` - -Install other components. +then execute following commands, suppose your config file name is qcloud-config.ini in your current directory: ``` -helm install kube-state-metrics deploy/helm/fadvisor/charts/kube-state-metrics -n crane-system -helm install node-exporter deploy/helm/fadvisor/charts/node-exporter -n crane-system -helm install prometheus deploy/helm/fadvisor/charts/prometheus -n crane-system -helm install grafana deploy/helm/fadvisor/charts/grafana -n crane-system +helm repo add crane https://gocrane.github.io/helm-charts +helm install fadvisor --set-file cloudConfigFile=qcloud-config.ini --set extraArgs.provider=qcloud -n crane-system --create-namespace crane/fadvisor ``` +Except Fadvisor, it will install following components in your system by default. + + - kube-state-metrics + - node-exporter + - prometheus + - grafana ## Integrated with existing monitoring components -If you have Prometheus and Grafana installed, you can just deploy the exporter and change related configuration. +If you have Prometheus and Grafana installed, you can just deploy fadvisor and change related configuration. -You can deploy the cost-exporter to your tke cluster to collect the metric, use prometheus to scrape the metric, and following dashboards can be used; +You can deploy the fadvisor to your tke cluster to collect the metric, use prometheus to scrape the metric, and following dashboards can be used; -### 1. Deploy cost-exporter +### 1. Deploy fadvisor #### Install by helm ``` -helm install cost-exporter deploy/helm/fadvisor/charts/cost-exporter -n crane-system --set extraArgs.provider={cloud provider, now support qcloud} --set extraArgs.secretid={{your cloud secret id}} --set extraArgs.secretkey={{your cloud secret key}} +helm repo add crane https://gocrane.github.io/helm-charts +helm install fadvisor -n crane-system --create-namespace crane/fadvisor ``` #### Install by kubectl -**NOTE** you must specify your k8s secret id and secret key in yaml, this is used to access Tencent Cloud Cvm API. +**NOTE** you must specify your k8s secret id and secret key in `config` file, this is used to access Tencent Cloud Cvm API. + +``` +[credentials] +clusterId={your cluster id} +appId=app1 +secretId={your cloud provider credential secret id} +secretKey={your cloud provider credential secret key} +[clientProfile] +defaultLimit=100 +defaultLanguage=zh-CN +defaultTimeoutSeconds=10 +region={your cluster region, such as ap-beijing、ap-shanghai、ap-guangzhou、ap-shenzhen and so on, you can find region name in your cloud provider console} +domainSuffix=internal.tencentcloudapi.com +scheme= +``` +then execute `cat config | base64`, paste the secret to following secret.yaml's config. ``` -kubectl create -f deploy/cost-exporter/ -n crane-system +apiVersion: v1 +kind: Secret +metadata: + name: fadvisor +data: + config: W2NyZWRlbnRpYWxzXQpjbHVzdGVySWQ9Y2x1c3RlcjEKYXBwSWQ9YXBwMQpzZWNyZXRJZD1pZDEKc2VjcmV0S2V5PWtleTEKW2NsaWVudFByb2ZpbGVdCmRlYnVnPXRydWUKZGVmYXVsdExpbWl0PTEKZGVmYXVsdExhbmd1YWdlPUNICmRlZmF1bHRUaW1lb3V0U2Vjb25kcz0xMApyZWdpb249c2hhbmdoYWkKZG9tYWluU3VmZml4PWNsb3VkLnRlbmNlbnQuY29tCnNjaGVtZT1odHRwCg== ``` -The cost-exporter param has secretId and secretKey, you must provide your cloud provider secret +then deploy by kubectl. ``` -containers: -- name: fadvisor-cost-exporter - image: docker.io/gocrane/fadvisor-cost-exporter:6927f01 - imagePullPolicy: IfNotPresent - command: - - /cost-exporter - - --v=4 - - --provider=qcloud - - --secretId= - - --secretKey= +kubectl create -f deploy/fadvisor/ -n crane-system ``` ### 2. Configure the prometheus scrape config and rules Configure following scrape target to your prometheus. ``` -- job_name: "fadvisor-cost-exporter" +- job_name: "fadvisor" honor_timestamps: true scrape_interval: 5m scheme: http metrics_path: /metrics static_configs: - - targets: ['cost-exporter.crane-system.svc.cluster.local:8081'] + - targets: ['fadvisor.crane-system.svc.cluster.local:8081'] ``` **NOTE**, except cost-exporter, your prometheus must have scraped the kubernetes metrics including: - `kubelet-cadvisor` metrics. diff --git a/cmd/cost-exporter/main.go b/cmd/cost-exporter/main.go deleted file mode 100644 index 28f3822..0000000 --- a/cmd/cost-exporter/main.go +++ /dev/null @@ -1,24 +0,0 @@ -package main - -import ( - "fmt" - "os" - - genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/component-base/logs" - - "github.com/gocrane/fadvisor/cmd/cost-exporter/app" -) - -// cost-exporter main. -func main() { - logs.InitLogs() - defer logs.FlushLogs() - - ctx := genericapiserver.SetupSignalContext() - - if err := app.NewExporterCommand(ctx).Execute(); err != nil { - _, _ = fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} diff --git a/cmd/cost-exporter/app/exporter.go b/cmd/fadvisor/app/exporter.go similarity index 87% rename from cmd/cost-exporter/app/exporter.go rename to cmd/fadvisor/app/exporter.go index 937b640..0a1ead0 100644 --- a/cmd/cost-exporter/app/exporter.go +++ b/cmd/fadvisor/app/exporter.go @@ -14,7 +14,7 @@ import ( "k8s.io/client-go/tools/leaderelection" "k8s.io/klog/v2" - "github.com/gocrane/fadvisor/cmd/cost-exporter/app/options" + "github.com/gocrane/fadvisor/cmd/fadvisor/app/options" exporter "github.com/gocrane/fadvisor/pkg/cost-exporter" "github.com/gocrane/fadvisor/pkg/cost-exporter/cache" "github.com/gocrane/fadvisor/pkg/cost-exporter/cloudcost" @@ -30,8 +30,8 @@ func NewExporterCommand(ctx context.Context) *cobra.Command { opts := options.NewOptions() cmd := &cobra.Command{ - Use: "cost-exporter", - Long: `cost-exporter used to export cost metrics to storage store such as prometheus`, + Use: "fadvisor", + Long: `fadvisor used to export cost metrics to storage store such as prometheus`, Run: func(cmd *cobra.Command, args []string) { if err := opts.Complete(); err != nil { klog.Errorf("opts complete failed, exit: %v", err) @@ -55,7 +55,7 @@ func NewExporterCommand(ctx context.Context) *cobra.Command { return cmd } -// Run runs the cost-exporter with options. This should never exit. +// Run runs the fadvisor with options. This should never exit. func Run(ctx context.Context, opts *options.Options) error { creator, err := util.CreateK8sClient(opts.ClientConfig, opts.MaxIdleConnsPerClient) @@ -63,8 +63,8 @@ func Run(ctx context.Context, opts *options.Options) error { return err } - kubeClient := creator("cost-exporter") - kubeEventClient := creator("cost-exporter-event") + kubeClient := creator("fadvisor") + kubeEventClient := creator("fadvisor-event") k8sCache := cache.NewCache(kubeClient) k8sCache.WaitForCacheSync(ctx.Done()) @@ -98,8 +98,8 @@ func Run(ctx context.Context, opts *options.Options) error { <-serverStopedCh } - eventRecorder := events.NewEventBroadcasterAdapter(kubeEventClient).NewRecorder("cost-exporter-event") - leadElectCfg, err := util.CreateLeaderElectionConfig("cost-exporter", kubeClient, eventRecorder, opts.LeaderElection) + eventRecorder := events.NewEventBroadcasterAdapter(kubeEventClient).NewRecorder("fadvisor-event") + leadElectCfg, err := util.CreateLeaderElectionConfig("fadvisor", kubeClient, eventRecorder, opts.LeaderElection) if err != nil { return fmt.Errorf("couldn't create leader elector config: %v", err) } diff --git a/cmd/cost-exporter/app/options/options.go b/cmd/fadvisor/app/options/options.go similarity index 98% rename from cmd/cost-exporter/app/options/options.go rename to cmd/fadvisor/app/options/options.go index 2e7934b..f415186 100644 --- a/cmd/cost-exporter/app/options/options.go +++ b/cmd/fadvisor/app/options/options.go @@ -84,7 +84,7 @@ func (o *Options) AddFlags(flags *pflag.FlagSet) { "The namespace of resource object that is used for locking during "+ "leader election.") - flags.StringVar(&o.CloudConfig.Provider, "provider", "default", "cloud provider the cost-exporter running on, now support default and qcloud only.") + flags.StringVar(&o.CloudConfig.Provider, "provider", "default", "cloud provider the fadvisor running on, now support default and qcloud only.") flags.StringVar(&o.CloudConfig.CloudConfigFile, "cloudConfigFile", "", "cloudConfigFile specifies path for the cloud configuration.") flags.StringVar(&o.ClientConfig.Kubeconfig, "kubeconfig", diff --git a/cmd/apiserver/main.go b/cmd/fadvisor/main.go similarity index 82% rename from cmd/apiserver/main.go rename to cmd/fadvisor/main.go index b477d41..21778dd 100644 --- a/cmd/apiserver/main.go +++ b/cmd/fadvisor/main.go @@ -7,10 +7,10 @@ import ( genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/component-base/logs" - "github.com/gocrane/fadvisor/cmd/cost-exporter/app" + "github.com/gocrane/fadvisor/cmd/fadvisor/app" ) -// apiserver main. +// fadvisor main. func main() { logs.InitLogs() defer logs.FlushLogs() diff --git a/deploy/cost-exporter/deployment.yaml b/deploy/fadvisor/deployment.yaml similarity index 56% rename from deploy/cost-exporter/deployment.yaml rename to deploy/fadvisor/deployment.yaml index 711ecd3..d5cdaca 100644 --- a/deploy/cost-exporter/deployment.yaml +++ b/deploy/fadvisor/deployment.yaml @@ -1,52 +1,40 @@ - -apiVersion: v1 -kind: Namespace -metadata: - name: crane-system - ---- -apiVersion: v1 -kind: Namespace -metadata: - name: crane-monitoring - --- kind: ServiceAccount apiVersion: v1 metadata: - name: fadvisor-cost-exporter + name: fadvisor namespace: crane-system --- apiVersion: apps/v1 kind: Deployment metadata: - name: fadvisor-cost-exporter + name: fadvisor namespace: crane-system labels: - app: fadvisor-cost-exporter + app: fadvisor spec: replicas: 1 selector: matchLabels: - app: fadvisor-cost-exporter + app: fadvisor template: metadata: labels: - app: fadvisor-cost-exporter + app: fadvisor spec: - serviceAccountName: fadvisor-cost-exporter + serviceAccountName: fadvisor tolerations: - key: node-role.kubernetes.io/master operator: Exists containers: - - name: fadvisor-cost-exporter - image: docker.io/gocrane/fadvisor-cost-exporter + - name: fadvisor + image: docker.io/gocrane/fadvisor:v0.1.0-9-gcbb4758 imagePullPolicy: IfNotPresent command: - - /cost-exporter + - /fadvisor - --v=4 - - --provider=qcloud + - --provider=default - --cloudConfigFile=/etc/cloud/config volumeMounts: - mountPath: /etc/cloud @@ -56,4 +44,4 @@ spec: - name: cloud-config secret: defaultMode: 420 - secretName: cloud-config \ No newline at end of file + secretName: fadvisor \ No newline at end of file diff --git a/deploy/cost-exporter/rbac.yaml b/deploy/fadvisor/rbac.yaml similarity index 73% rename from deploy/cost-exporter/rbac.yaml rename to deploy/fadvisor/rbac.yaml index e348e62..142c564 100644 --- a/deploy/cost-exporter/rbac.yaml +++ b/deploy/fadvisor/rbac.yaml @@ -1,7 +1,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: fadvisor-cost-exporter + name: fadvisor rules: - apiGroups: [ '*' ] resources: [ '*' ] @@ -11,12 +11,12 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: fadvisor-cost-exporter + name: fadvisor roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: fadvisor-cost-exporter + name: fadvisor subjects: - kind: ServiceAccount - name: fadvisor-cost-exporter + name: fadvisor namespace: crane-system \ No newline at end of file diff --git a/deploy/cost-exporter/secret.yaml b/deploy/fadvisor/secret.yaml similarity index 94% rename from deploy/cost-exporter/secret.yaml rename to deploy/fadvisor/secret.yaml index ec46318..528259d 100644 --- a/deploy/cost-exporter/secret.yaml +++ b/deploy/fadvisor/secret.yaml @@ -1,6 +1,6 @@ apiVersion: v1 kind: Secret metadata: - name: cloud-config + name: fadvisor data: config: W2NyZWRlbnRpYWxzXQpjbHVzdGVySWQ9Y2x1c3RlcjEKYXBwSWQ9YXBwMQpzZWNyZXRJZD1pZDEKc2VjcmV0S2V5PWtleTEKW2NsaWVudFByb2ZpbGVdCmRlYnVnPXRydWUKZGVmYXVsdExpbWl0PTEKZGVmYXVsdExhbmd1YWdlPUNICmRlZmF1bHRUaW1lb3V0U2Vjb25kcz0xMApyZWdpb249c2hhbmdoYWkKZG9tYWluU3VmZml4PWNsb3VkLnRlbmNlbnQuY29tCnNjaGVtZT1odHRwCg== \ No newline at end of file diff --git a/deploy/cost-exporter/service.yaml b/deploy/fadvisor/service.yaml similarity index 76% rename from deploy/cost-exporter/service.yaml rename to deploy/fadvisor/service.yaml index f2277c4..489a432 100644 --- a/deploy/cost-exporter/service.yaml +++ b/deploy/fadvisor/service.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: fadvisor-cost-exporter + name: fadvisor namespace: crane-system spec: ports: @@ -12,4 +12,4 @@ spec: port: 8081 targetPort: 8081 selector: - app: fadvisor-cost-exporter + app: fadvisor diff --git a/deploy/helm/fadvisor/.helmignore b/deploy/helm/fadvisor/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/deploy/helm/fadvisor/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/deploy/helm/fadvisor/Chart.yaml b/deploy/helm/fadvisor/Chart.yaml deleted file mode 100644 index df84c70..0000000 --- a/deploy/helm/fadvisor/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: fadvisor -description: A Helm chart for fadvisor - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "0.1.0" diff --git a/deploy/helm/fadvisor/charts/cost-exporter/.helmignore b/deploy/helm/fadvisor/charts/cost-exporter/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/deploy/helm/fadvisor/charts/cost-exporter/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/deploy/helm/fadvisor/charts/cost-exporter/Chart.yaml b/deploy/helm/fadvisor/charts/cost-exporter/Chart.yaml deleted file mode 100644 index 937fe80..0000000 --- a/deploy/helm/fadvisor/charts/cost-exporter/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: cost-exporter -description: A Helm chart for cost-exporter - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "0.1.0" diff --git a/deploy/helm/fadvisor/charts/cost-exporter/templates/_helpers.tpl b/deploy/helm/fadvisor/charts/cost-exporter/templates/_helpers.tpl deleted file mode 100644 index 42af234..0000000 --- a/deploy/helm/fadvisor/charts/cost-exporter/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "prometheus.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "prometheus.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "prometheus.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "prometheus.labels" -}} -helm.sh/chart: {{ include "prometheus.chart" . }} -{{ include "prometheus.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "prometheus.selectorLabels" -}} -app.kubernetes.io/name: {{ include "prometheus.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "prometheus.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "prometheus.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/cost-exporter/templates/deployment.yaml b/deploy/helm/fadvisor/charts/cost-exporter/templates/deployment.yaml deleted file mode 100644 index eb8ef26..0000000 --- a/deploy/helm/fadvisor/charts/cost-exporter/templates/deployment.yaml +++ /dev/null @@ -1,47 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: {{ .Values.CostExporterName }} - name: {{ .Values.CostExporterName }} - namespace: {{ .Release.Namespace }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: {{ .Values.CostExporterName }} - template: - metadata: - labels: - app: {{ .Values.CostExporterName }} - spec: - serviceAccountName: {{ .Values.CostExporterName }} - containers: - - name: {{ .Values.CostExporterName }} - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} - command: - - /cost-exporter - args: - {{- range $key, $value := .Values.extraArgs }} - {{- if $value }} - - --{{ $key }}={{ $value }} - {{- else }} - - --{{ $key }} - {{- end }} - {{- end }} - {{- with .Values.resources }} - resources: - {{- toYaml . | nindent 10 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/deploy/helm/fadvisor/charts/cost-exporter/templates/rbac.yaml b/deploy/helm/fadvisor/charts/cost-exporter/templates/rbac.yaml deleted file mode 100644 index c540465..0000000 --- a/deploy/helm/fadvisor/charts/cost-exporter/templates/rbac.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.CostExporterName }} - namespace: {{ .Release.Namespace }} - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ .Values.CostExporterName }} -rules: - - apiGroups: [ '*' ] - resources: [ '*' ] - verbs: [ "*" ] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ .Values.CostExporterName }} -subjects: - - kind: ServiceAccount - name: {{ .Values.CostExporterName }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ .Values.CostExporterName }} - apiGroup: rbac.authorization.k8s.io diff --git a/deploy/helm/fadvisor/charts/cost-exporter/templates/service.yaml b/deploy/helm/fadvisor/charts/cost-exporter/templates/service.yaml deleted file mode 100644 index ef0b260..0000000 --- a/deploy/helm/fadvisor/charts/cost-exporter/templates/service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '{{ .Values.service.port }}' - labels: - app: {{ .Values.CostExporterName }} - name: {{ .Values.CostExporterName }} - namespace: {{ .Release.Namespace }} -spec: - type: {{ .Values.service.type }} - ports: - - name: http - protocol: TCP - port: {{ .Values.service.port }} - targetPort: 8081 - {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} - nodePort: {{ .Values.service.nodePort }} - {{- end }} - selector: - app: {{ .Values.CostExporterName }} \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/cost-exporter/values.yaml b/deploy/helm/fadvisor/charts/cost-exporter/values.yaml deleted file mode 100644 index 842e7eb..0000000 --- a/deploy/helm/fadvisor/charts/cost-exporter/values.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Default values for prometheus. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -CostExporterName: "cost-exporter" - -replicaCount: 1 - -# image for prometheus deployment -image: - repository: "docker.io/gocrane/fadvisor-cost-exporter" - pullPolicy: Always - tag: "v0.1.0" - -service: - type: ClusterIP - port: 8081 - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoSchedule" - -resources: - requests: - cpu: 250m - memory: 750Mi - limits: - cpu: 1 - memory: 1Gi - -extraArgs: - v: 4 \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/grafana/.helmignore b/deploy/helm/fadvisor/charts/grafana/.helmignore deleted file mode 100644 index 8cade13..0000000 --- a/deploy/helm/fadvisor/charts/grafana/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.vscode -.project -.idea/ -*.tmproj -OWNERS diff --git a/deploy/helm/fadvisor/charts/grafana/Chart.yaml b/deploy/helm/fadvisor/charts/grafana/Chart.yaml deleted file mode 100644 index 4c5cb35..0000000 --- a/deploy/helm/fadvisor/charts/grafana/Chart.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: v2 -name: grafana -version: 6.20.3 -appVersion: 8.3.4 -kubeVersion: "^1.8.0-0" -description: The leading tool for querying and visualizing time series and metrics. -home: https://grafana.net -icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png -sources: - - https://github.com/grafana/grafana -maintainers: - - name: zanhsieh - email: zanhsieh@gmail.com - - name: rtluckie - email: rluckie@cisco.com - - name: maorfr - email: maor.friedman@redhat.com - - name: Xtigyro - email: miroslav.hadzhiev@gmail.com - - name: torstenwalter - email: mail@torstenwalter.de -engine: gotpl -type: application diff --git a/deploy/helm/fadvisor/charts/grafana/README.md b/deploy/helm/fadvisor/charts/grafana/README.md deleted file mode 100644 index b6a2d7a..0000000 --- a/deploy/helm/fadvisor/charts/grafana/README.md +++ /dev/null @@ -1,560 +0,0 @@ -# Grafana Helm Chart - -* Installs the web dashboarding system [Grafana](http://grafana.org/) - -## Get Repo Info - -```console -helm repo add grafana https://grafana.github.io/helm-charts -helm repo update -``` - -_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ - -## Installing the Chart - -To install the chart with the release name `my-release`: - -```console -helm install my-release grafana/grafana -``` - -## Uninstalling the Chart - -To uninstall/delete the my-release deployment: - -```console -helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Upgrading an existing Release to a new major version - -A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an -incompatible breaking change needing manual actions. - -### To 4.0.0 (And 3.12.1) - -This version requires Helm >= 2.12.0. - -### To 5.0.0 - -You have to add --force to your helm upgrade command as the labels of the chart have changed. - -### To 6.0.0 - -This version requires Helm >= 3.1.0. - -## Configuration - -| Parameter | Description | Default | -|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| -| `replicas` | Number of nodes | `1` | -| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | -| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | -| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | -| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | -| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| -| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | -| `priorityClassName` | Name of Priority Class to assign pods | `nil` | -| `image.repository` | Image repository | `grafana/grafana` | -| `image.tag` | Image tag (`Must be >= 5.0.0`) | `8.2.5` | -| `image.sha` | Image sha (optional) | `2acf04c016c77ca2e89af3536367ce847ee326effb933121881c7c89781051d3` | -| `image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Image pull secrets | `{}` | -| `service.enabled` | Enable grafana service | `true` | -| `service.type` | Kubernetes service type | `ClusterIP` | -| `service.port` | Kubernetes port where service is exposed | `80` | -| `service.portName` | Name of the port on the service | `service` | -| `service.targetPort` | Internal service is port | `3000` | -| `service.nodePort` | Kubernetes service nodePort | `nil` | -| `service.annotations` | Service annotations | `{}` | -| `service.labels` | Custom labels | `{}` | -| `service.clusterIP` | internal cluster service IP | `nil` | -| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | -| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | -| `service.externalIPs` | service external IP addresses | `[]` | -| `headlessService` | Create a headless service | `false` | -| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | -| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | -| `ingress.enabled` | Enables Ingress | `false` | -| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | -| `ingress.labels` | Custom labels | `{}` | -| `ingress.path` | Ingress accepted path | `/` | -| `ingress.pathType` | Ingress type of path | `Prefix` | -| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | -| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | -| `ingress.tls` | Ingress TLS configuration | `[]` | -| `resources` | CPU/Memory resource requests/limits | `{}` | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `tolerations` | Toleration labels for pod assignment | `[]` | -| `affinity` | Affinity settings for pod assignment | `{}` | -| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | -| `extraContainers` | Sidecar containers to add to the grafana pod | `""` | -| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | -| `extraLabels` | Custom labels for all manifests | `{}` | -| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | -| `persistence.enabled` | Use persistent volume to store data | `false` | -| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | -| `persistence.size` | Size of persistent volume claim | `10Gi` | -| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` | -| `persistence.storageClassName` | Type of persistent volume claim | `nil` | -| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | -| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | -| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | -| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` | -| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | -| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | -| `initChownData.enabled` | If false, don't reset data ownership at startup | true | -| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | -| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | -| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | -| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | -| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | -| `schedulerName` | Alternate scheduler name | `nil` | -| `env` | Extra environment variables passed to pods | `{}` | -| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. | `{}` | -| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | -| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` | -| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` | -| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | -| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | -| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` | -| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | -| `plugins` | Plugins to be loaded along with Grafana | `[]` | -| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | -| `notifiers` | Configure grafana notifiers | `{}` | -| `dashboardProviders` | Configure grafana dashboard providers | `{}` | -| `dashboards` | Dashboards to import | `{}` | -| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | -| `grafana.ini` | Grafana's primary configuration | `{}` | -| `ldap.enabled` | Enable LDAP authentication | `false` | -| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | -| `ldap.config` | Grafana's LDAP configuration | `""` | -| `annotations` | Deployment annotations | `{}` | -| `labels` | Deployment labels | `{}` | -| `podAnnotations` | Pod annotations | `{}` | -| `podLabels` | Pod labels | `{}` | -| `podPortName` | Name of the grafana port on the pod | `grafana` | -| `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` | -| `sidecar.image.tag` | Sidecar image tag | `1.12.3` | -| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | -| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | -| `sidecar.resources` | Sidecar resources | `{}` | -| `sidecar.securityContext` | Sidecar securityContext | `{}` | -| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` | -| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | -| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | -| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | -| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | -| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | -| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | -| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | -| `sidecar.dashboards.provider.type` | Provider type | `file` | -| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | -| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | -| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | -| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | -| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `nil` | -| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | -| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | -| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | -| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` | -| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` | -| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | -| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` | -| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | -| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | -| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` | -| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` | -| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | -| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` | -| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | -| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | -| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces.Otherwise the namespace in which the sidecar is running will be used.It's also possible to specify ALL to search in all namespaces. | `nil` | -| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | -| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | -| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | -| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | -| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` | -| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | -| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | -| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` | -| `serviceAccount.annotations` | ServiceAccount annotations | | -| `serviceAccount.create` | Create service account | `true` | -| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | -| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | -| `rbac.create` | Create and use RBAC resources | `true` | -| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | -| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | -| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` | -| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` | -| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | -| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | -| `command` | Define command to be executed by grafana container at startup | `nil` | -| `testFramework.enabled` | Whether to create test-related resources | `true` | -| `testFramework.image` | `test-framework` image repository. | `bats/bats` | -| `testFramework.tag` | `test-framework` image tag. | `v1.4.1` | -| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | -| `testFramework.securityContext` | `test-framework` securityContext | `{}` | -| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | -| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | -| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | -| `downloadDashboardsImage.repository` | Curl docker image repo | `curlimages/curl` | -| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | -| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | -| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | -| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | -| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | -| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | -| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | -| `serviceMonitor.path` | Path to scrape | `/metrics` | -| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | -| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | -| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | -| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | -| `serviceMonitor.relabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | -| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | -| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | -| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | -| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | -| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | -| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | -| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | -| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | -| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | -| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | -| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | -| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | -| `imageRenderer.service.portName` | image-renderer service port name | `http` | -| `imageRenderer.service.port` | image-renderer service port used by both service and deployment | `8081` | -| `imageRenderer.grafanaProtocol` | Protocol to use for image renderer callback url | `http` | -| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | -| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | -| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | -| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | -| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | -| `imageRenderer.resources` | Set resource limits for image-renderer pdos | `{}` | -| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | -| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | -| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` | - - - -### Example ingress with path - -With grafana 6.3 and above -```yaml -grafana.ini: - server: - domain: monitoring.example.com - root_url: "%(protocol)s://%(domain)s/grafana" - serve_from_sub_path: true -ingress: - enabled: true - hosts: - - "monitoring.example.com" - path: "/grafana" -``` - -### Example of extraVolumeMounts - -Volume can be type persistentVolumeClaim or hostPath but not both at same time. -If none existingClaim or hostPath argument is givent then type is emptyDir. - -```yaml -- extraVolumeMounts: - - name: plugins - mountPath: /var/lib/grafana/plugins - subPath: configs/grafana/plugins - existingClaim: existing-grafana-claim - readOnly: false - - name: dashboards - mountPath: /var/lib/grafana/dashboards - hostPath: /usr/shared/grafana/dashboards - readOnly: false -``` - -## Import dashboards - -There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: - -```yaml -dashboards: - default: - some-dashboard: - json: | - { - "annotations": - - ... - # Complete json file here - ... - - "title": "Some Dashboard", - "uid": "abcd1234", - "version": 1 - } - custom-dashboard: - # This is a path to a file inside the dashboards directory inside the chart directory - file: dashboards/custom-dashboard.json - prometheus-stats: - # Ref: https://grafana.com/dashboards/2 - gnetId: 2 - revision: 2 - datasource: Prometheus - local-dashboard: - url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json -``` - -## BASE64 dashboards - -Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) -A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. -If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. - -### Gerrit use case - -Gerrit API for download files has the following schema: where {project-name} and -{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard -the url value is - -## Sidecar for dashboards - -If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana -pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with -a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written -to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported -dashboards are deleted/updated. - -A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside -one configmap is currently not properly mirrored in grafana. - -Example dashboard config: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: sample-grafana-dashboard - labels: - grafana_dashboard: "1" -data: - k8s-dashboard.json: |- - [...] -``` - -## Sidecar for datasources - -If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana -pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and -filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in -those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, -the data sources in grafana can be imported. - -Secrets are recommended over configmaps for this usecase because datasources usually contain private -data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. - -Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): - -```yaml -datasources: - datasources.yaml: - apiVersion: 1 - datasources: - # name of the datasource. Required - - name: Graphite - # datasource type. Required - type: graphite - # access mode. proxy or direct (Server or Browser in the UI). Required - access: proxy - # org id. will default to orgId 1 if not specified - orgId: 1 - # url - url: http://localhost:8080 - # database password, if used - password: - # database user, if used - user: - # database name, if used - database: - # enable/disable basic auth - basicAuth: - # basic auth username - basicAuthUser: - # basic auth password - basicAuthPassword: - # enable/disable with credentials headers - withCredentials: - # mark as default datasource. Max one per org - isDefault: - # fields that will be converted to json and stored in json_data - jsonData: - graphiteVersion: "1.1" - tlsAuth: true - tlsAuthWithCACert: true - # json object of data that will be encrypted. - secureJsonData: - tlsCACert: "..." - tlsClientCert: "..." - tlsClientKey: "..." - version: 1 - # allow users to edit datasources from the UI. - editable: false -``` - -## Sidecar for notifiers - -If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana -pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and -filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in -those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, -the notification channels in grafana can be imported. The secrets must be created before -`helm install` so that the notifiers init container can list the secrets. - -Secrets are recommended over configmaps for this usecase because alert notification channels usually contain -private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. - -Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): - -```yaml -notifiers: - - name: notification-channel-1 - type: slack - uid: notifier1 - # either - org_id: 2 - # or - org_name: Main Org. - is_default: true - send_reminder: true - frequency: 1h - disable_resolve_message: false - # See `Supported Settings` section for settings supporter for each - # alert notification type. - settings: - recipient: 'XXX' - token: 'xoxb' - uploadImage: true - url: https://slack.com - -delete_notifiers: - - name: notification-channel-1 - uid: notifier1 - org_id: 2 - - name: notification-channel-2 - # default org_id: 1 -``` - -## How to serve Grafana with a path prefix (/grafana) - -In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. - -```yaml -ingress: - enabled: true - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/use-regex: "true" - - path: /grafana/?(.*) - hosts: - - k8s.example.dev - -grafana.ini: - server: - root_url: http://localhost:3000/grafana # this host can be localhost -``` - -## How to securely reference secrets in grafana.ini - -This example uses Grafana uses [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. - -In grafana.ini: - -```yaml -grafana.ini: - [auth.generic_oauth] - enabled = true - client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} - client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} -``` - -Existing secret, or created along with helm: - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: auth-generic-oauth-secret -type: Opaque -stringData: - client_id: - client_secret: -``` - -Include in the `extraSecretMounts` configuration flag: - -```yaml -- extraSecretMounts: - - name: auth-generic-oauth-secret-mount - secretName: auth-generic-oauth-secret - defaultMode: 0440 - mountPath: /etc/secrets/auth_generic_oauth - readOnly: true -``` - -### extraSecretMounts using a Container Storage Interface (CSI) provider - -This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) - -```yaml -- extraSecretMounts: - - name: secrets-store-inline - mountPath: /run/secrets - readOnly: true - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: "my-provider" - nodePublishSecretRef: - name: akv-creds -``` - -## Image Renderer Plug-In - -This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/docs/remote_rendering_using_docker.md) - -```yaml -imageRenderer: - enabled: true -``` - -### Image Renderer NetworkPolicy - -By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance - -### High Availability for unified alerting - -If you want to run Grafana in a high availability cluster you need to enable -the headless service by setting `headlessService: true` in your `values.yaml` -file. - -As next step you have to setup the `grafana.ini` in your `values.yaml` in a way -that it will make use of the headless service to obtain all the IPs of the -cluster. You should replace ``{{ Name }}`` with the name of your helm deployment. - -```yaml -grafana.ini: - ... - unified_alerting: - enabled: true - ha_peers: {{ Name }}-headless:9094 - alerting: - enabled: false -``` diff --git a/deploy/helm/fadvisor/charts/grafana/ci/default-values.yaml b/deploy/helm/fadvisor/charts/grafana/ci/default-values.yaml deleted file mode 100644 index fc2ba60..0000000 --- a/deploy/helm/fadvisor/charts/grafana/ci/default-values.yaml +++ /dev/null @@ -1 +0,0 @@ -# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/deploy/helm/fadvisor/charts/grafana/ci/with-dashboard-json-values.yaml b/deploy/helm/fadvisor/charts/grafana/ci/with-dashboard-json-values.yaml deleted file mode 100644 index e0c4e41..0000000 --- a/deploy/helm/fadvisor/charts/grafana/ci/with-dashboard-json-values.yaml +++ /dev/null @@ -1,53 +0,0 @@ -dashboards: - my-provider: - my-awesome-dashboard: - # An empty but valid dashboard - json: | - { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "6.3.5" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "links": [], - "panels": [], - "schemaVersion": 19, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-6h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": ["5s"] - }, - "timezone": "", - "title": "Dummy Dashboard", - "uid": "IdcYQooWk", - "version": 1 - } - datasource: Prometheus diff --git a/deploy/helm/fadvisor/charts/grafana/ci/with-dashboard-values.yaml b/deploy/helm/fadvisor/charts/grafana/ci/with-dashboard-values.yaml deleted file mode 100644 index 7b662c5..0000000 --- a/deploy/helm/fadvisor/charts/grafana/ci/with-dashboard-values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -dashboards: - my-provider: - my-awesome-dashboard: - gnetId: 10000 - revision: 1 - datasource: Prometheus -dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'my-provider' - orgId: 1 - folder: '' - type: file - updateIntervalSeconds: 10 - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards/my-provider diff --git a/deploy/helm/fadvisor/charts/grafana/ci/with-image-renderer-values.yaml b/deploy/helm/fadvisor/charts/grafana/ci/with-image-renderer-values.yaml deleted file mode 100644 index 32f3074..0000000 --- a/deploy/helm/fadvisor/charts/grafana/ci/with-image-renderer-values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -podLabels: - customLableA: Aaaaa -imageRenderer: - enabled: true - env: - RENDERING_ARGS: --disable-gpu,--window-size=1280x758 - RENDERING_MODE: clustered - podLabels: - customLableB: Bbbbb - networkPolicy: - limitIngress: true - limitEgress: true - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 500m - memory: 50Mi diff --git a/deploy/helm/fadvisor/charts/grafana/dashboards/cluster-costs.json b/deploy/helm/fadvisor/charts/grafana/dashboards/cluster-costs.json deleted file mode 100644 index 4d43d67..0000000 --- a/deploy/helm/fadvisor/charts/grafana/dashboards/cluster-costs.json +++ /dev/null @@ -1,1511 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "description": "A dashboard to help manage Kubernetes cluster costs and resources", - "editable": true, - "fiscalYearStartMonth": 0, - "gnetId": 6873, - "graphTooltip": 0, - "iteration": 1641381016806, - "links": [], - "liveNow": false, - "panels": [ - { - "description": "Monthly estimated costs according to latest 1 hour resource usage, Note, if you use virtual kubelet or EKS Pod, we do not compute the virtual node costs.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "currencyJPY" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Value #Nodes-Monthly-Estimated-Costs" - }, - "properties": [ - { - "id": "displayName", - "value": "Nodes-Monthly-Estimated-Costs" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #Total-Requests-Monthly-Estimated-Costs" - }, - "properties": [ - { - "id": "displayName", - "value": "Total-Requests-Monthly-Estimated-Costs" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #Total-Usage-Monthly-Estimated-Costs" - }, - "properties": [ - { - "id": "displayName", - "value": "Total-Usage-Monthly-Estimated-Costs" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #Cpu-Requests-Monthly-Estimated-Costs" - }, - "properties": [ - { - "id": "displayName", - "value": "Cpu-Requests-Monthly-Estimated-Costs" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #Ram-Requests-Monthly-Estimated-Costs" - }, - "properties": [ - { - "id": "displayName", - "value": "Ram-Requests-Monthly-Estimated-Costs" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #Cpu-Usage-Monthly-Estimated-Costs" - }, - "properties": [ - { - "id": "displayName", - "value": "Cpu-Usage-Monthly-Estimated-Costs" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #Ram-Usage-Monthly-Estimated-Costs" - }, - "properties": [ - { - "id": "displayName", - "value": "Ram-Usage-Monthly-Estimated-Costs" - } - ] - } - ] - }, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 136, - "options": { - "displayMode": "basic", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showUnfilled": true - }, - "pluginVersion": "8.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum (\n avg(\n avg_over_time(node_total_hourly_cost[1h])\n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)\n )\n \nby (node)) * 730 * ($Discount/100.0)", - "format": "table", - "hide": false, - "interval": "", - "legendFormat": "", - "refId": "Nodes-Monthly-Estimated-Costs" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum(\n sum(kube_pod_container_resource_requests{resource=\"cpu\", unit=\"core\"}) by (container, pod, node, namespace) \n * on (node) group_left() \n avg(\n avg_over_time(node_cpu_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\",label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) \n ) by (node)\n \n)\n\n+\n\nsum(\n sum(kube_pod_container_resource_requests{resource=\"memory\", unit=\"byte\", namespace!=\"\"} / 1024./ 1024. / 1024.) by (container, pod, node, namespace) \n * on (node) group_left() \n avg(\n avg_over_time(node_ram_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)\n ) by (node) \n \n)\n) * 730 * ($Discount/100.0)", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "Total-Requests-Monthly-Estimated-Costs" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum (\n sum(label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) \n * on (node) group_left()\n avg(\n avg_over_time(node_cpu_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\",label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node) \n ) by (node)\n\n+\n\n sum(label_replace(avg_over_time(container_memory_working_set_bytes{container!=\"POD\",container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 \n * on (node) group_left()\n avg(\n avg_over_time(node_ram_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node)\n ) by (node) \n) * 730 * ($Discount/100.)", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "Total-Usage-Monthly-Estimated-Costs" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum(kube_pod_container_resource_requests{resource=\"cpu\", unit=\"core\"}) by (container, pod, node, namespace) \n * on (node) group_left()\n avg(\n avg_over_time(node_cpu_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\",label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node)\n ) by (node)\n) * 730 * ($Discount/100.)", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "Cpu-Requests-Monthly-Estimated-Costs" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum(kube_pod_container_resource_requests{resource=\"memory\", unit=\"byte\", namespace!=\"\"} / 1024./ 1024. / 1024.) by (container, pod, node, namespace) * on (node) group_left()\n avg(\n avg_over_time(node_ram_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node)\n ) by (node) \n) * 730 * ($Discount/100.)", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "Ram-Requests-Monthly-Estimated-Costs" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum(label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) * on (node) group_left()\n avg(\n avg_over_time(node_cpu_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node)\n ) by (node) \n) * 730 * ($Discount/100.)", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "Cpu-Usage-Monthly-Estimated-Costs" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum(label_replace(avg_over_time(container_memory_working_set_bytes{container!=\"POD\",container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 * on (node) group_left()\n avg(\n avg_over_time(node_ram_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node)\n ) by (node) \n) * 730 * ($Discount/100.)", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "Ram-Usage-Monthly-Estimated-Costs" - } - ], - "title": "Estimated Monthly Cluster Resource Costs", - "type": "bargauge" - }, - { - "description": "This table shows the comparison of application CPU usage vs the capacity of the node (measured over last 60 minutes)", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "auto", - "displayMode": "auto", - "filterable": false - }, - "mappings": [ - { - "options": { - "C": { - "text": "" - } - }, - "type": "value" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "node" - }, - "properties": [ - { - "id": "displayName", - "value": "Node" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #cpu-requests-alloc-util" - }, - "properties": [ - { - "id": "displayName", - "value": "CPU-Requests-Alloc-Utilization" - }, - { - "id": "unit", - "value": "percent" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #cpu-usage-util" - }, - "properties": [ - { - "id": "displayName", - "value": "CPU-Usage-Utlization" - }, - { - "id": "unit", - "value": "percent" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #cpu-hourly-costs" - }, - "properties": [ - { - "id": "unit", - "value": "currencyJPY" - }, - { - "id": "displayName", - "value": "Cpu-Hourly-Cost" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #cpu-usage-hourly-cost" - }, - "properties": [ - { - "id": "unit", - "value": "currencyJPY" - }, - { - "id": "displayName", - "value": "Cpu-Usage-Hourly-Cost" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #ram-requests-alloc-util" - }, - "properties": [ - { - "id": "displayName", - "value": "RAM-Requests-Alloc-Utilization" - }, - { - "id": "unit", - "value": "percent" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #ram-usage-util" - }, - "properties": [ - { - "id": "displayName", - "value": "Ram-Usage-Utilization" - }, - { - "id": "unit", - "value": "percent" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #ram-houly-cost" - }, - "properties": [ - { - "id": "displayName", - "value": "Ram-Hourly-Cost" - }, - { - "id": "unit", - "value": "currencyJPY" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #ram-usage-hourly-cost" - }, - "properties": [ - { - "id": "displayName", - "value": "Ram-Usage-Hourly-Cost" - }, - { - "id": "unit", - "value": "currencyJPY" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #Node-Hourly-Total-Cost" - }, - "properties": [ - { - "id": "unit", - "value": "currencyJPY" - }, - { - "id": "displayName", - "value": "Node-Hourly-Total-Cost" - } - ] - } - ] - }, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 7 - }, - "hideTimeOverride": true, - "id": 90, - "links": [], - "options": { - "footer": { - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "showHeader": true, - "sortBy": [ - { - "desc": true, - "displayName": "RAM-Requests-Alloc-Utilization" - } - ] - }, - "pluginVersion": "8.3.3", - "repeatDirection": "v", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n kube_pod_container_resource_requests{resource=\"cpu\", unit=\"core\"} \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)\n) by (node) \n\n/\n\nsum(\n kube_node_status_capacity{resource=\"cpu\", unit=\"core\"} \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)\n) by (node) \n\n* 100", - "format": "table", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "cpu-requests-alloc-util" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\") \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)\n) by (node)\n/\nsum(\n kube_node_status_capacity{resource=\"cpu\", unit=\"core\"} \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) \n) by (node) \n\n* 100", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "cpu-usage-util" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "avg(\n avg_over_time(node_cpu_hourly_cost[1h]) \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)\n) by (node) * ($Discount/100.)", - "format": "table", - "hide": false, - "instant": true, - "interval": "1h", - "legendFormat": "", - "refId": "cpu-hourly-costs" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n\n sum(\n label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\") \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) \n ) by (container, pod, node, namespace) \n\n * on (node) group_left() \n \n avg(\n avg_over_time(node_cpu_hourly_cost[1h]) \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) \n ) by (node) \n\n) by (node) * ($Discount/100.)", - "format": "table", - "hide": false, - "instant": true, - "interval": "1h", - "legendFormat": "", - "refId": "cpu-usage-hourly-cost" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n kube_pod_container_resource_requests{resource=\"memory\", unit=\"byte\", namespace!=\"\"} \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) \n) by (node) * 100\n\n/ \n\nsum(\n kube_node_status_capacity{resource=\"memory\", unit=\"byte\"} \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)\n) by (node)", - "format": "table", - "hide": false, - "instant": true, - "interval": "1h", - "legendFormat": "", - "refId": "ram-requests-alloc-util" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n label_replace(container_memory_usage_bytes{namespace!=\"\",container!=\"POD\", container!=\"\",image!=\"\"}, \"node\", \"$1\", \"instance\",\"(.+)\") \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)\n) by (node) * 100\n\n/\nsum(\n kube_node_status_capacity{resource=\"memory\", unit=\"byte\"} \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)\n) by (node)", - "format": "table", - "hide": false, - "instant": true, - "interval": "1h", - "legendFormat": "", - "refId": "ram-usage-util" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "avg(\n avg_over_time(node_ram_hourly_cost[1h])) by (node) \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}\n) by (node) * ($Discount/100.)", - "format": "table", - "hide": false, - "instant": true, - "interval": "1h", - "legendFormat": "", - "refId": "ram-houly-cost" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n\n sum(\n label_replace(avg_over_time(container_memory_working_set_bytes{container!=\"POD\",container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\") \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)\n ) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 \n\n * on (node) group_left()\n\n avg(\n avg_over_time(node_ram_hourly_cost[1h]) \n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) \n ) by (node)\n \n) by (node) * ($Discount/100.)\n", - "format": "table", - "hide": false, - "instant": true, - "interval": "1h", - "legendFormat": "", - "refId": "ram-usage-hourly-cost" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "avg(\n avg_over_time(node_total_hourly_cost[1h])\n * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) \n) by (node) * ($Discount/100.)", - "format": "table", - "hide": false, - "instant": true, - "interval": "1h", - "legendFormat": "", - "refId": "Node-Hourly-Total-Cost" - } - ], - "title": "resource cost & utilization by node", - "transformations": [ - { - "id": "merge", - "options": { - "reducers": [] - } - }, - { - "id": "organize", - "options": { - "excludeByName": { - "Time": true - }, - "indexByName": {}, - "renameByName": {} - } - } - ], - "type": "table" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 13 - }, - "id": 133, - "panels": [], - "title": "Clutser Utilization", - "type": "row" - }, - { - "description": "Current CPU utilization from applications usage vs allocatable CPU", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "decimals": 2, - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "max": 100, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "rgba(245, 54, 54, 0.9)", - "value": null - }, - { - "color": "rgba(50, 172, 45, 0.97)", - "value": 30 - }, - { - "color": "#c15c17", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 6, - "x": 0, - "y": 14 - }, - "id": 91, - "links": [], - "maxDataPoints": 100, - "options": { - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "8.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\") * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node))\n\n/\n\nsum(kube_node_status_allocatable{resource=\"cpu\", unit=\"core\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)) \n\n* 100", - "hide": false, - "interval": "", - "legendFormat": "", - "refId": "B" - } - ], - "title": "CPU Utilization", - "type": "gauge" - }, - { - "description": "Current CPU reservation requests from applications vs allocatable CPU", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "decimals": 2, - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "max": 100, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "rgba(245, 54, 54, 0.9)", - "value": null - }, - { - "color": "rgba(50, 172, 45, 0.97)", - "value": 30 - }, - { - "color": "#c15c17", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 6, - "x": 6, - "y": 14 - }, - "id": 134, - "links": [], - "maxDataPoints": 100, - "options": { - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "8.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(kube_pod_container_resource_requests{resource=\"cpu\", unit=\"core\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) ) \n/ \nsum(kube_node_status_allocatable{resource=\"cpu\", unit=\"core\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) )\n* 100", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A", - "step": 10 - } - ], - "title": "CPU Requests", - "type": "gauge" - }, - { - "description": "Current RAM use vs RAM available", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "max": 100, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "rgba(245, 54, 54, 0.9)", - "value": null - }, - { - "color": "rgba(50, 172, 45, 0.97)", - "value": 30 - }, - { - "color": "#c15c17", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 6, - "x": 12, - "y": 14 - }, - "hideTimeOverride": true, - "id": 80, - "links": [], - "maxDataPoints": 100, - "options": { - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "8.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(label_replace(container_memory_working_set_bytes{container!=\"POD\",container!=\"\",image!=\"\"}, \"node\", \"$1\", \"instance\", \"(.*)\") * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node))\n\n/\n\nsum(kube_node_status_allocatable{resource=\"memory\", unit=\"byte\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) ) \n* 100", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A", - "step": 10 - } - ], - "timeFrom": "", - "title": "RAM Utilization", - "type": "gauge" - }, - { - "description": "Current RAM requests vs RAM available", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "max": 100, - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "rgba(245, 54, 54, 0.9)", - "value": null - }, - { - "color": "rgba(50, 172, 45, 0.97)", - "value": 30 - }, - { - "color": "#c15c17", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 6, - "x": 18, - "y": 14 - }, - "id": 92, - "links": [], - "maxDataPoints": 100, - "options": { - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "8.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "(\n sum(kube_pod_container_resource_requests{resource=\"memory\", unit=\"byte\", namespace!=\"\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) )\n /\n sum(kube_node_status_allocatable{resource=\"memory\", unit=\"byte\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) )\n) * 100", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A", - "step": 10 - } - ], - "title": "RAM Requests", - "type": "gauge" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 18 - }, - "id": 108, - "panels": [], - "title": "CPU Metrics", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 19 - }, - "hiddenSeries": false, - "id": 116, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.3", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "SUM(kube_node_status_capacity{resource=\"cpu\", unit=\"core\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "capacity", - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "SUM(kube_pod_container_resource_requests{resource=\"cpu\", unit=\"core\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "requests", - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "SUM(irate(container_cpu_usage_seconds_total{id=\"/\"}[5m]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "usage", - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "SUM(kube_pod_container_resource_limits{resource=\"cpu\", unit=\"core\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)) ", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "limits", - "refId": "D" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Cluster CPUs", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 1, - "format": "short", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "description": "Current CPU use from applications divided by allocatable CPUs", - "fieldConfig": { - "defaults": { - "unit": "percent" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 5, - "w": 24, - "x": 0, - "y": 27 - }, - "hiddenSeries": false, - "hideTimeOverride": true, - "id": 82, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "maxDataPoints": 100, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\nlabel_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\") \n* on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)\n)\n/\nsum(\nkube_node_status_allocatable{resource=\"cpu\", unit=\"core\"} \n* on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) \n) \n\n* 100", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "cpu", - "refId": "A", - "step": 10 - } - ], - "thresholds": [], - "timeFrom": "", - "timeRegions": [], - "title": "Cluster CPU Utilization", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percent", - "logBase": 1, - "show": true - }, - { - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 32 - }, - "id": 113, - "panels": [], - "title": "Memory Metrics", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 33 - }, - "hiddenSeries": false, - "id": 117, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.3", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "SUM(kube_node_status_capacity{resource=\"memory\", unit=\"byte\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) / 1024 / 1024 / 1024 )", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "capacity", - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "SUM(kube_pod_container_resource_requests{resource=\"memory\", unit=\"byte\", namespace!=\"\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node)/ 1024 / 1024 / 1024)", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "requests", - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "SUM(container_memory_usage_bytes{image!=\"\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) / 1024 / 1024 / 1024)", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "usage", - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "SUM(kube_pod_container_resource_limits{resource=\"memory\", unit=\"byte\", namespace!=\"\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) / 1024 / 1024 / 1024)", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "limits", - "refId": "D" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Cluster memory (GB)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:3385", - "format": "decgbytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:3386", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "fieldConfig": { - "defaults": { - "unit": "percent" - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 41 - }, - "hiddenSeries": false, - "id": 131, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.3", - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "(\n sum(kube_pod_container_resource_requests{resource=\"memory\", unit=\"byte\", namespace!=\"\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) )\n /\n sum(kube_node_status_allocatable{resource=\"memory\", unit=\"byte\"} * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!=\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}) by (node) )\n) * 100", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "usage", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Cluster Memory Utilization", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:3355", - "format": "percent", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:3356", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - } - ], - "refresh": false, - "schemaVersion": 34, - "style": "dark", - "tags": [ - "cost", - "utilization", - "metrics" - ], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "100", - "value": "100" - }, - "hide": 0, - "name": "Discount", - "options": [ - { - "selected": true, - "text": "100", - "value": "100" - } - ], - "query": "100", - "skipUrlSync": false, - "type": "textbox" - } - ] - }, - "time": { - "from": "now-24h", - "to": "now" - }, - "timepicker": { - "hidden": false, - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "browser", - "title": "Cluster cost & utilization metrics", - "uid": "cluster-costs", - "version": 2, - "weekStart": "" -} \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/grafana/dashboards/costs-dimension.json b/deploy/helm/fadvisor/charts/grafana/dashboards/costs-dimension.json deleted file mode 100644 index 9322c62..0000000 --- a/deploy/helm/fadvisor/charts/grafana/dashboards/costs-dimension.json +++ /dev/null @@ -1,1175 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "iteration": 1641378721229, - "links": [], - "liveNow": false, - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 19, - "panels": [], - "title": "Namespace consumed resource usage costs", - "type": "row" - }, - { - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - } - }, - "mappings": [], - "unit": "currencyJPY" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 1 - }, - "id": 16, - "options": { - "displayLabels": [ - "percent" - ], - "legend": { - "displayMode": "list", - "placement": "right", - "values": [] - }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": true - }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum_over_time(namespace:container_cpu_usage_costs_hourly:sum_rate{}[$__range]) + \n sum_over_time(namespace:container_memory_usage_costs_hourly:sum_rate{}[$__range])\n) by (namespace) * ($Discount/100.0)", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "{{namespace}}", - "refId": "A" - } - ], - "title": "last--hours-total-costs-by-namespace{${__from:date:YYYY-MM-DD-hh}->${__to:date:YYYY-MM-DD-hh}}", - "type": "piechart" - }, - { - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - } - }, - "mappings": [], - "unit": "currencyJPY" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 8, - "y": 1 - }, - "id": 20, - "options": { - "displayLabels": [ - "percent" - ], - "legend": { - "displayMode": "list", - "placement": "right", - "values": [] - }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": true - }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum_over_time(namespace:container_cpu_usage_costs_hourly:sum_rate{}[7d]) + \n sum_over_time(namespace:container_memory_usage_costs_hourly:sum_rate{}[7d])\n) by (namespace) * ($Discount/100.0)", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "title": "Last-7day-costs-by-namespace", - "type": "piechart" - }, - { - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - } - }, - "mappings": [], - "unit": "currencyJPY" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 16, - "y": 1 - }, - "id": 17, - "options": { - "displayLabels": [ - "percent" - ], - "legend": { - "displayMode": "list", - "placement": "right", - "values": [] - }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": true - }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum_over_time(namespace:container_cpu_usage_costs_hourly:sum_rate{}[30d]) + \n sum_over_time(namespace:container_memory_usage_costs_hourly:sum_rate{}[30d])\n) by (namespace) * ($Discount/100.0)", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "title": "Last-30day-costs-by-namespace", - "type": "piechart" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 9 - }, - "id": 2, - "panels": [], - "title": "Namespace-hourly-resource-usage-cost-trends", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "description": "Total Resource Usage Hourly Cost by Namespace", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 10 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": true, - "expr": "sum (\nsum(label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) * on (node) group_left()\n avg(avg_over_time(node_cpu_hourly_cost[1h])) by (node)\n \n+\n\nsum(label_replace(avg_over_time(container_memory_working_set_bytes{container!=\"POD\",container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 * on (node) group_left()\n avg(avg_over_time(node_ram_hourly_cost[1h])) by (node)\n) by (namespace) * ($Discount/100.0)", - "interval": "1h", - "legendFormat": "{{namespace}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total Resource Usage Hourly Cost", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:3848", - "format": "currencyJPY", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:3849", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "description": "Ram Resource Usage Houly Cost by Namespace", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 10 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": true, - "expr": "sum(\n sum(label_replace(avg_over_time(container_memory_working_set_bytes{container!=\"POD\",container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 * on (node) group_left()\n avg(avg_over_time(node_ram_hourly_cost[1h])) by (node)\n) by (namespace) * ($Discount/100.0)", - "interval": "1h", - "legendFormat": "{{namespace}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Ram Resource Usage Houly Cost", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:3783", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:3784", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "description": "CPU Resource Usage Hourly cost by namespace", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 18 - }, - "hiddenSeries": false, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "8.3.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": true, - "expr": "sum(\n\nsum(label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) * on (node) group_left()\n avg(avg_over_time(node_cpu_hourly_cost[1h])) by (node)\n\n\n ) by (namespace) * ($Discount/100.0)", - "interval": "1h", - "legendFormat": "{{namespace}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "CPU Resource Usage Hourly Cost", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:3675", - "format": "currencyJPY", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:3676", - "format": "currencyJPY", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "description": "Estimated Namespace Monthly Cost according latest hourly cost", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - } - }, - "mappings": [ - { - "options": { - "": { - "text": "" - } - }, - "type": "value" - } - ], - "unit": "currencyJPY" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 18 - }, - "id": 10, - "options": { - "displayLabels": [], - "legend": { - "displayMode": "list", - "placement": "right", - "values": [] - }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": true - }, - "text": {}, - "tooltip": { - "mode": "single" - } - }, - "pluginVersion": "7.5.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum (\nsum(label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) * on (node) group_left()\n avg(avg_over_time(node_cpu_hourly_cost[1h])) by (node)\n\n+\n\nsum(label_replace(avg_over_time(container_memory_working_set_bytes{container!=\"POD\",container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 * on (node) group_left()\n avg(avg_over_time(node_ram_hourly_cost[1h])) by (node)\n) by (namespace) * 730 * ($Discount/100.0)", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "{{namespace}}", - "refId": "A" - } - ], - "title": "Estimated Namespace Resource Usage Monthly Cost", - "type": "piechart" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 26 - }, - "id": 14, - "panels": [], - "title": "Containers", - "type": "row" - }, - { - "description": "Top K container costs of latest hour", - "fieldConfig": { - "defaults": { - "custom": { - "align": "auto", - "displayMode": "auto" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Value #total-costs" - }, - "properties": [ - { - "id": "displayName", - "value": "total-costs" - }, - { - "id": "unit", - "value": "currencyJPY" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #cpu-usage-costs" - }, - "properties": [ - { - "id": "displayName", - "value": "cpu-usage-costs" - }, - { - "id": "unit", - "value": "currencyJPY" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #ram-usage-costs" - }, - "properties": [ - { - "id": "unit", - "value": "currencyJPY" - }, - { - "id": "displayName", - "value": "ram-usage-costs" - } - ] - } - ] - }, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 27 - }, - "id": 23, - "options": { - "footer": { - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "showHeader": true, - "sortBy": [ - { - "desc": true, - "displayName": "total-costs" - } - ] - }, - "pluginVersion": "8.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "topk(20, sum(\n sum_over_time(namespace:container_cpu_usage_costs_hourly:sum_rate{}[$__range]) + \n sum_over_time(namespace:container_memory_usage_costs_hourly:sum_rate{}[$__range])\n) by (container,namespace,pod) * ($Discount/100.0)\n) ", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "total-costs" - } - ], - "title": "TopK Container Total Usage Costs{${__from:date:YYYY-MM-DD-hh}->${__to:date:YYYY-MM-DD-hh}}", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "container", - "namespace", - "node", - "pod", - "Value #cpu-usage-costs", - "Value #ram-usage-costs", - "Value #total-costs" - ] - } - } - }, - { - "id": "merge", - "options": {} - } - ], - "type": "table" - }, - { - "description": "Top K container costs of latest hour", - "fieldConfig": { - "defaults": { - "custom": { - "align": "auto", - "displayMode": "auto" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Value #total-costs" - }, - "properties": [ - { - "id": "displayName", - "value": "total-costs" - }, - { - "id": "unit", - "value": "currencyJPY" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #cpu-usage-costs" - }, - "properties": [ - { - "id": "displayName", - "value": "cpu-usage-costs" - }, - { - "id": "unit", - "value": "currencyJPY" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #ram-usage-costs" - }, - "properties": [ - { - "id": "unit", - "value": "currencyJPY" - }, - { - "id": "displayName", - "value": "ram-usage-costs" - } - ] - } - ] - }, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 36 - }, - "id": 12, - "options": { - "footer": { - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "showHeader": true, - "sortBy": [ - { - "desc": true, - "displayName": "total-costs" - } - ] - }, - "pluginVersion": "8.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "topk(20, sum (\nsum(label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) * on (node) group_left()\n avg(avg_over_time(node_cpu_hourly_cost[1h])) by (node)\n\n+\n\nsum(label_replace(avg_over_time(container_memory_working_set_bytes{container!=\"POD\",container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 * on (node) group_left()\n avg(avg_over_time(node_ram_hourly_cost[1h])) by (node)\n) by (container, pod, node, namespace)) * ($Discount/100.0)", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "total-costs" - } - ], - "title": "TopK Container Total Usage Costs Latest 1 Hour", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "container", - "namespace", - "node", - "pod", - "Value #cpu-usage-costs", - "Value #ram-usage-costs", - "Value #total-costs" - ] - } - } - }, - { - "id": "merge", - "options": {} - } - ], - "type": "table" - }, - { - "description": "Top K container cpu usage costs of latest hour", - "fieldConfig": { - "defaults": { - "custom": { - "align": "auto", - "displayMode": "auto" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Value #total-costs" - }, - "properties": [ - { - "id": "displayName", - "value": "total-costs" - }, - { - "id": "unit", - "value": "currencyJPY" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #cpu-usage-costs" - }, - "properties": [ - { - "id": "displayName", - "value": "cpu-usage-costs" - }, - { - "id": "unit", - "value": "currencyJPY" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #ram-usage-costs" - }, - "properties": [ - { - "id": "unit", - "value": "currencyJPY" - }, - { - "id": "displayName", - "value": "ram-usage-costs" - } - ] - } - ] - }, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 45 - }, - "id": 21, - "options": { - "footer": { - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "showHeader": true - }, - "pluginVersion": "8.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "topk(20, sum (\nsum(label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) * on (node) group_left()\n avg(avg_over_time(node_cpu_hourly_cost[1h])) by (node)\n) by (container, pod, node, namespace) * ($Discount/100.0) )", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "cpu-usage-costs" - } - ], - "title": "TopK Container Cpu Usage Costs Latest 1 Hour", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "container", - "namespace", - "node", - "pod", - "Value #cpu-usage-costs", - "Value #ram-usage-costs", - "Value #total-costs" - ] - } - } - }, - { - "id": "merge", - "options": {} - } - ], - "type": "table" - }, - { - "description": "Top K container costs of latest hour", - "fieldConfig": { - "defaults": { - "custom": { - "align": "auto", - "displayMode": "auto" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Value #total-costs" - }, - "properties": [ - { - "id": "displayName", - "value": "total-costs" - }, - { - "id": "unit", - "value": "currencyJPY" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #cpu-usage-costs" - }, - "properties": [ - { - "id": "displayName", - "value": "cpu-usage-costs" - }, - { - "id": "unit", - "value": "currencyJPY" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #ram-usage-costs" - }, - "properties": [ - { - "id": "unit", - "value": "currencyJPY" - }, - { - "id": "displayName", - "value": "ram-usage-costs" - } - ] - } - ] - }, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 54 - }, - "id": 22, - "options": { - "footer": { - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "showHeader": true, - "sortBy": [ - { - "desc": true, - "displayName": "ram-usage-costs" - } - ] - }, - "pluginVersion": "8.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "topk(20, sum (\nsum(label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) * on (node) group_left()\n avg(avg_over_time(node_cpu_hourly_cost[1h])) by (node)\n\n+\n\nsum(label_replace(avg_over_time(container_memory_working_set_bytes{container!=\"POD\",container!=\"\",image!=\"\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 * on (node) group_left()\n avg(avg_over_time(node_ram_hourly_cost[1h])) by (node)\n) by (container, pod, node, namespace) * ($Discount/100.0))", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "ram-usage-costs" - } - ], - "title": "TopK Container Mem Usage Costs Latest 1 Hour", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "container", - "namespace", - "node", - "pod", - "Value #ram-usage-costs" - ] - } - } - }, - { - "id": "merge", - "options": {} - } - ], - "type": "table" - } - ], - "schemaVersion": 34, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "100", - "value": "100" - }, - "hide": 0, - "name": "Discount", - "options": [ - { - "selected": true, - "text": "100", - "value": "100" - } - ], - "query": "100", - "skipUrlSync": false, - "type": "textbox" - } - ] - }, - "time": { - "from": "now-24h", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "Costs by Dimension", - "uid": "Pq1y8i07z", - "version": 2, - "weekStart": "" -} \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/grafana/dashboards/namespace-costs.json b/deploy/helm/fadvisor/charts/grafana/dashboards/namespace-costs.json deleted file mode 100644 index 41776f8..0000000 --- a/deploy/helm/fadvisor/charts/grafana/dashboards/namespace-costs.json +++ /dev/null @@ -1,445 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "description": "", - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": 6, - "iteration": 1641389211629, - "links": [], - "liveNow": false, - "panels": [ - { - "description": "cpu requests and usage costs given namespace ${namespace}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "currencyJPY" - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 0 - }, - "id": 2, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" - }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum(kube_pod_container_resource_requests{resource=\"cpu\", unit=\"core\", namespace=\"$namespace\"}) by (container, pod, node, namespace) \n * on (node) group_left()\n avg(\n avg_over_time(node_cpu_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\",label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node)\n ) by (node)\n) * ($Discount/100.)", - "instant": false, - "interval": "1h", - "legendFormat": "requests-costs", - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum(label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\",namespace=\"$namespace\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) \n * on (node) group_left()\n avg(\n avg_over_time(node_cpu_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\",label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node) \n ) by (node)\n) * ($Discount/100.)", - "hide": false, - "interval": "1h", - "legendFormat": "usage-costs", - "refId": "B" - } - ], - "title": "CPU Hourly Costs Over Time By Namespace ${namespace}", - "type": "timeseries" - }, - { - "description": "ram requests and usage costs given namespace ${namespace}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "currencyJPY" - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 0 - }, - "id": 6, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" - }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum(label_replace(avg_over_time(container_memory_working_set_bytes{container!=\"POD\",container!=\"\",image!=\"\",namespace=\"$namespace\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 \n * on (node) group_left()\n avg(\n avg_over_time(node_ram_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node)\n ) by (node)\n) * ($Discount/100.)", - "instant": false, - "interval": "1h", - "legendFormat": "requests-costs", - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum(kube_pod_container_resource_requests{resource=\"memory\", unit=\"byte\", namespace=\"$namespace\"} / 1024./ 1024. / 1024.) by (container, pod, node, namespace) * on (node) group_left()\n avg(\n avg_over_time(node_ram_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node)\n ) by (node) \n) * ($Discount/100.)", - "hide": false, - "interval": "1h", - "legendFormat": "usage-costs", - "refId": "B" - } - ], - "title": "RAM Hourly Costs Over Time By Namespace ${namespace}", - "type": "timeseries" - }, - { - "description": "requests and usage costs given namespace ${namespace}", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "currencyJPY" - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 9 - }, - "id": 3, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" - }, - "tooltip": { - "mode": "single" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum(kube_pod_container_resource_requests{resource=\"cpu\", unit=\"core\", namespace=\"$namespace\"}) by (container, pod, node, namespace) \n * on (node) group_left()\n avg(\n avg_over_time(node_cpu_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\",label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node)\n ) by (node)\n) * ($Discount/100.)\n\n+\n\nsum(\n sum(kube_pod_container_resource_requests{resource=\"memory\", unit=\"byte\", namespace=\"$namespace\"} / 1024./ 1024. / 1024.) by (container, pod, node, namespace) * on (node) group_left()\n avg(\n avg_over_time(node_ram_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node)\n ) by (node) \n) * ($Discount/100.)", - "instant": false, - "interval": "1h", - "legendFormat": "requests-costs", - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum(label_replace(irate(container_cpu_usage_seconds_total{container!=\"POD\", container!=\"\",image!=\"\",namespace=\"$namespace\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) \n * on (node) group_left()\n avg(\n avg_over_time(node_cpu_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\",label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node) \n ) by (node)\n) * ($Discount/100.)\n+\nsum(\n sum(label_replace(avg_over_time(container_memory_working_set_bytes{container!=\"POD\",container!=\"\",image!=\"\",namespace=\"$namespace\"}[1h]), \"node\", \"$1\", \"instance\", \"(.*)\")) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 \n * on (node) group_left()\n avg(\n avg_over_time(node_ram_hourly_cost[1h]) * on (node) group_left() max(kube_node_labels{label_beta_kubernetes_io_instance_type!~\"eklet\", label_node_kubernetes_io_instance_type!~\"eklet\"}\n ) by (node)\n ) by (node) \n) * ($Discount/100.)", - "hide": false, - "interval": "1h", - "legendFormat": "usage-costs", - "refId": "B" - } - ], - "title": "Hourly Costs Over Time By Namespace ${namespace}", - "type": "timeseries" - }, - { - "description": "", - "fieldConfig": { - "defaults": { - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "currencyJPY" - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 9 - }, - "id": 5, - "options": { - "colorMode": "background", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "8.3.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "PBFA97CFB590B2093" - }, - "exemplar": false, - "expr": "sum(\n sum_over_time(namespace:container_cpu_usage_costs_hourly:sum_rate{namespace=\"$namespace\"}[$__range]) + \n sum_over_time(namespace:container_memory_usage_costs_hourly:sum_rate{namespace=\"$namespace\"}[$__range])\n) by (namespace) * ($Discount/100.0)", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "title": "namespace(${namespace}) total usage costs", - "type": "stat" - } - ], - "refresh": "", - "schemaVersion": 34, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "test", - "value": "test" - }, - "definition": "label_values(kube_namespace_created{},namespace)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "namespace", - "options": [], - "query": { - "query": "label_values(kube_namespace_created{},namespace)", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "current": { - "selected": true, - "text": "100", - "value": "100" - }, - "hide": 0, - "name": "Discount", - "options": [ - { - "selected": true, - "text": "100", - "value": "100" - } - ], - "query": "100", - "skipUrlSync": false, - "type": "textbox" - } - ] - }, - "time": { - "from": "now-12h", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "Namespace Costs", - "uid": "X78y9yAnk", - "version": 6, - "weekStart": "" -} \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/grafana/templates/NOTES.txt b/deploy/helm/fadvisor/charts/grafana/templates/NOTES.txt deleted file mode 100644 index 1fc8436..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/NOTES.txt +++ /dev/null @@ -1,54 +0,0 @@ -1. Get your '{{ .Values.adminUser }}' user password by running: - - kubectl get secret --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo - -2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: - - {{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}.svc.cluster.local -{{ if .Values.ingress.enabled }} - If you bind grafana to 80, please update values in values.yaml and reinstall: - ``` - securityContext: - runAsUser: 0 - runAsGroup: 0 - fsGroup: 0 - - command: - - "setcap" - - "'cap_net_bind_service=+ep'" - - "/usr/sbin/grafana-server &&" - - "sh" - - "/run.sh" - ``` - Details refer to https://grafana.com/docs/installation/configuration/#http-port. - Or grafana would always crash. - - From outside the cluster, the server URL(s) are: -{{- range .Values.ingress.hosts }} - http://{{ . }} -{{- end }} -{{ else }} - Get the Grafana URL to visit by running these commands in the same shell: -{{ if contains "NodePort" .Values.service.type -}} - export NODE_PORT=$(kubectl get --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{ else if contains "LoadBalancer" .Values.service.type -}} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ template "grafana.namespace" . }} -w {{ template "grafana.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - http://$SERVICE_IP:{{ .Values.service.port -}} -{{ else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ template "grafana.namespace" . }} -l "app.kubernetes.io/name={{ template "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ template "grafana.namespace" . }} port-forward $POD_NAME 3000 -{{- end }} -{{- end }} - -3. Login with the password from step 1 and the username: {{ .Values.adminUser }} - -{{- if not .Values.persistence.enabled }} -################################################################################# -###### WARNING: Persistence is disabled!!! You will lose your data when ##### -###### the Grafana pod is terminated. ##### -################################################################################# -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/_helpers.tpl b/deploy/helm/fadvisor/charts/grafana/templates/_helpers.tpl deleted file mode 100644 index f0c06aa..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/_helpers.tpl +++ /dev/null @@ -1,163 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "grafana.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "grafana.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "grafana.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create the name of the service account -*/}} -{{- define "grafana.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{- define "grafana.serviceAccountNameTest" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} -{{- else -}} - {{ default "default" .Values.serviceAccount.nameTest }} -{{- end -}} -{{- end -}} - -{{/* -Allow the release namespace to be overridden for multi-namespace deployments in combined charts -*/}} -{{- define "grafana.namespace" -}} - {{- if .Values.namespaceOverride -}} - {{- .Values.namespaceOverride -}} - {{- else -}} - {{- .Release.Namespace -}} - {{- end -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "grafana.labels" -}} -helm.sh/chart: {{ include "grafana.chart" . }} -{{ include "grafana.selectorLabels" . }} -{{- if or .Chart.AppVersion .Values.image.tag }} -app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- if .Values.extraLabels }} -{{ toYaml .Values.extraLabels }} -{{- end }} -{{- end -}} - -{{/* -Selector labels -*/}} -{{- define "grafana.selectorLabels" -}} -app.kubernetes.io/name: {{ include "grafana.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "grafana.imageRenderer.labels" -}} -helm.sh/chart: {{ include "grafana.chart" . }} -{{ include "grafana.imageRenderer.selectorLabels" . }} -{{- if or .Chart.AppVersion .Values.image.tag }} -app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Selector labels ImageRenderer -*/}} -{{- define "grafana.imageRenderer.selectorLabels" -}} -app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} - -{{/* -Looks if there's an existing secret and reuse its password. If not it generates -new password and use it. -*/}} -{{- define "grafana.password" -}} -{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) -}} - {{- if $secret -}} - {{- index $secret "data" "admin-password" -}} - {{- else -}} - {{- (randAlphaNum 40) | b64enc | quote -}} - {{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for rbac. -*/}} -{{- define "grafana.rbac.apiVersion" -}} - {{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} - {{- print "rbac.authorization.k8s.io/v1" -}} - {{- else -}} - {{- print "rbac.authorization.k8s.io/v1beta1" -}} - {{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "grafana.ingress.apiVersion" -}} - {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) -}} - {{- print "networking.k8s.io/v1" -}} - {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} - {{- print "networking.k8s.io/v1beta1" -}} - {{- else -}} - {{- print "extensions/v1beta1" -}} - {{- end -}} -{{- end -}} - -{{/* -Return if ingress is stable. -*/}} -{{- define "grafana.ingress.isStable" -}} - {{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" -}} -{{- end -}} - -{{/* -Return if ingress supports ingressClassName. -*/}} -{{- define "grafana.ingress.supportsIngressClassName" -}} - {{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}} -{{- end -}} - -{{/* -Return if ingress supports pathType. -*/}} -{{- define "grafana.ingress.supportsPathType" -}} - {{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/_pod.tpl b/deploy/helm/fadvisor/charts/grafana/templates/_pod.tpl deleted file mode 100644 index c73d682..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/_pod.tpl +++ /dev/null @@ -1,560 +0,0 @@ - -{{- define "grafana.pod" -}} -{{- if .Values.schedulerName }} -schedulerName: "{{ .Values.schedulerName }}" -{{- end }} -serviceAccountName: {{ template "grafana.serviceAccountName" . }} -automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} -{{- if .Values.securityContext }} -securityContext: -{{ toYaml .Values.securityContext | indent 2 }} -{{- end }} -{{- if .Values.hostAliases }} -hostAliases: -{{ toYaml .Values.hostAliases | indent 2 }} -{{- end }} -{{- if .Values.priorityClassName }} -priorityClassName: {{ .Values.priorityClassName }} -{{- end }} -{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.notifiers.enabled .Values.extraInitContainers) }} -initContainers: -{{- end }} -{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} - - name: init-chown-data - {{- if .Values.initChownData.image.sha }} - image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}" - {{- else }} - image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} - securityContext: - runAsNonRoot: false - runAsUser: 0 - command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }}", "/var/lib/grafana"] - resources: -{{ toYaml .Values.initChownData.resources | indent 6 }} - volumeMounts: - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} -{{- end }} -{{- if .Values.dashboards }} - - name: download-dashboards - {{- if .Values.downloadDashboardsImage.sha }} - image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" - {{- else }} - image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} - command: ["/bin/sh"] - args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ] - resources: -{{ toYaml .Values.downloadDashboards.resources | indent 6 }} - env: -{{- range $key, $value := .Values.downloadDashboards.env }} - - name: "{{ $key }}" - value: "{{ $value }}" -{{- end }} -{{- if .Values.downloadDashboards.envFromSecret }} - envFrom: - - secretRef: - name: {{ tpl .Values.downloadDashboards.envFromSecret . }} -{{- end }} - volumeMounts: - - name: config - mountPath: "/etc/grafana/download_dashboards.sh" - subPath: download_dashboards.sh - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- end }} -{{- end }} -{{- if .Values.sidecar.notifiers.enabled }} - - name: {{ template "grafana.name" . }}-sc-notifiers - {{- if .Values.sidecar.image.sha }} - image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - - name: METHOD - value: LIST - - name: LABEL - value: "{{ .Values.sidecar.notifiers.label }}" - - name: FOLDER - value: "/etc/grafana/provisioning/notifiers" - - name: RESOURCE - value: {{ quote .Values.sidecar.notifiers.resource }} - {{- if .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ .Values.sidecar.enableUniqueFilenames }}" - {{- end }} - {{- if .Values.sidecar.notifiers.searchNamespace }} - - name: NAMESPACE - value: "{{ .Values.sidecar.notifiers.searchNamespace | join "," }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - resources: -{{ toYaml .Values.sidecar.resources | indent 6 }} -{{- if .Values.sidecar.securityContext }} - securityContext: -{{- toYaml .Values.sidecar.securityContext | nindent 6 }} -{{- end }} - volumeMounts: - - name: sc-notifiers-volume - mountPath: "/etc/grafana/provisioning/notifiers" -{{- end}} -{{- if .Values.extraInitContainers }} -{{ toYaml .Values.extraInitContainers | indent 2 }} -{{- end }} -{{- if .Values.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end}} -{{- end }} -{{- if not .Values.enableKubeBackwardCompatibility }} -enableServiceLinks: {{ .Values.enableServiceLinks }} -{{- end }} -containers: -{{- if .Values.sidecar.dashboards.enabled }} - - name: {{ template "grafana.name" . }}-sc-dashboard - {{- if .Values.sidecar.image.sha }} - image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - - name: METHOD - value: {{ .Values.sidecar.dashboards.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.dashboards.label }}" - {{- if .Values.sidecar.dashboards.labelValue }} - - name: LABEL_VALUE - value: {{ quote .Values.sidecar.dashboards.labelValue }} - {{- end }} - - name: FOLDER - value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" - - name: RESOURCE - value: {{ quote .Values.sidecar.dashboards.resource }} - {{- if .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ .Values.sidecar.enableUniqueFilenames }}" - {{- end }} - {{- if .Values.sidecar.dashboards.searchNamespace }} - - name: NAMESPACE - value: "{{ .Values.sidecar.dashboards.searchNamespace | join "," }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - {{- if .Values.sidecar.dashboards.folderAnnotation }} - - name: FOLDER_ANNOTATION - value: "{{ .Values.sidecar.dashboards.folderAnnotation }}" - {{- end }} - {{- if .Values.sidecar.dashboards.script }} - - name: SCRIPT - value: "{{ .Values.sidecar.dashboards.script }}" - {{- end }} - resources: -{{ toYaml .Values.sidecar.resources | indent 6 }} -{{- if .Values.sidecar.securityContext }} - securityContext: -{{- toYaml .Values.sidecar.securityContext | nindent 6 }} -{{- end }} - volumeMounts: - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} - {{- if .Values.sidecar.dashboards.extraMounts }} - {{- toYaml .Values.sidecar.dashboards.extraMounts | trim | nindent 6}} - {{- end }} -{{- end}} -{{- if .Values.sidecar.datasources.enabled }} - - name: {{ template "grafana.name" . }}-sc-datasources - {{- if .Values.sidecar.image.sha }} - image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - - name: METHOD - value: {{ .Values.sidecar.datasources.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.datasources.label }}" - {{- if .Values.sidecar.datasources.labelValue }} - - name: LABEL_VALUE - value: {{ quote .Values.sidecar.datasources.labelValue }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/datasources" - - name: RESOURCE - value: {{ quote .Values.sidecar.datasources.resource }} - {{- if .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ .Values.sidecar.enableUniqueFilenames }}" - {{- end }} - {{- if .Values.sidecar.datasources.searchNamespace }} - - name: NAMESPACE - value: "{{ .Values.sidecar.datasources.searchNamespace | join "," }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - - name: REQ_URL - value: {{ .Values.sidecar.datasources.reloadURL }} - - name: REQ_METHOD - value: POST - resources: -{{ toYaml .Values.sidecar.resources | indent 6 }} -{{- if .Values.sidecar.securityContext }} - securityContext: -{{- toYaml .Values.sidecar.securityContext | nindent 6 }} -{{- end }} - volumeMounts: - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end}} - - name: {{ .Chart.Name }} - {{- if .Values.image.sha }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}" - {{- else }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if .Values.command }} - command: - {{- range .Values.command }} - - {{ . }} - {{- end }} - {{- end}} -{{- if .Values.containerSecurityContext }} - securityContext: -{{- toYaml .Values.containerSecurityContext | nindent 6 }} -{{- end }} - volumeMounts: - - name: config - mountPath: "/etc/grafana/grafana.ini" - subPath: grafana.ini - {{- if .Values.ldap.enabled }} - - name: ldap - mountPath: "/etc/grafana/ldap.toml" - subPath: ldap.toml - {{- end }} - {{- range .Values.extraConfigmapMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath | default "" }} - readOnly: {{ .readOnly }} - {{- end }} - - name: storage - mountPath: "/var/lib/grafana" -{{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} -{{- end }} -{{- if .Values.dashboards }} -{{- range $provider, $dashboards := .Values.dashboards }} -{{- range $key, $value := $dashboards }} -{{- if (or (hasKey $value "json") (hasKey $value "file")) }} - - name: dashboards-{{ $provider }} - mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" - subPath: "{{ $key }}.json" -{{- end }} -{{- end }} -{{- end }} -{{- end -}} -{{- if .Values.dashboardsConfigMaps }} -{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }} - - name: dashboards-{{ . }} - mountPath: "/var/lib/grafana/dashboards/{{ . }}" -{{- end }} -{{- end }} -{{- if .Values.datasources }} -{{- range (keys .Values.datasources | sortAlpha) }} - - name: config - mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" - subPath: {{ . | quote }} -{{- end }} -{{- end }} -{{- if .Values.notifiers }} -{{- range (keys .Values.notifiers | sortAlpha) }} - - name: config - mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" - subPath: {{ . | quote }} -{{- end }} -{{- end }} -{{- if .Values.dashboardProviders }} -{{- range (keys .Values.dashboardProviders | sortAlpha) }} - - name: config - mountPath: "/etc/grafana/provisioning/dashboards/{{ . }}" - subPath: {{ . | quote }} -{{- end }} -{{- end }} -{{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} -{{ if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" - subPath: provider.yaml -{{- end}} -{{- end}} -{{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end}} -{{- if .Values.sidecar.notifiers.enabled }} - - name: sc-notifiers-volume - mountPath: "/etc/grafana/provisioning/notifiers" -{{- end}} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - subPath: {{ .subPath | default "" }} - {{- end }} - {{- range .Values.extraVolumeMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath | default "" }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - {{- end }} - ports: - - name: {{ .Values.service.portName }} - containerPort: {{ .Values.service.port }} - protocol: TCP - - name: {{ .Values.podPortName }} - containerPort: 3000 - protocol: TCP - env: - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: GF_SECURITY_ADMIN_USER - valueFrom: - secretKeyRef: - name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: GF_SECURITY_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if .Values.plugins }} - - name: GF_INSTALL_PLUGINS - valueFrom: - configMapKeyRef: - name: {{ template "grafana.fullname" . }} - key: plugins - {{- end }} - {{- if .Values.smtp.existingSecret }} - - name: GF_SMTP_USER - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.userKey | default "user" }} - - name: GF_SMTP_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.passwordKey | default "password" }} - {{- end }} - {{- if .Values.imageRenderer.enabled }} - - name: GF_RENDERING_SERVER_URL - value: http://{{ template "grafana.fullname" . }}-image-renderer.{{ template "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render - - name: GF_RENDERING_CALLBACK_URL - value: {{ .Values.imageRenderer.grafanaProtocol }}://{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} - {{- end }} - - name: GF_PATHS_DATA - value: {{ (get .Values "grafana.ini").paths.data }} - - name: GF_PATHS_LOGS - value: {{ (get .Values "grafana.ini").paths.logs }} - - name: GF_PATHS_PLUGINS - value: {{ (get .Values "grafana.ini").paths.plugins }} - - name: GF_PATHS_PROVISIONING - value: {{ (get .Values "grafana.ini").paths.provisioning }} - {{- range $key, $value := .Values.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: -{{ toYaml $value | indent 10 }} - {{- end }} -{{- range $key, $value := .Values.env }} - - name: "{{ tpl $key $ }}" - value: "{{ tpl (print $value) $ }}" -{{- end }} - {{- if or .Values.envFromSecret (or .Values.envRenderSecret .Values.envFromSecrets) }} - envFrom: - {{- if .Values.envFromSecret }} - - secretRef: - name: {{ tpl .Values.envFromSecret . }} - {{- end }} - {{- if .Values.envRenderSecret }} - - secretRef: - name: {{ template "grafana.fullname" . }}-env - {{- end }} - {{- range .Values.envFromSecrets }} - - secretRef: - name: {{ .name }} - optional: {{ .optional | default false }} - {{- end }} - {{- end }} - livenessProbe: -{{ toYaml .Values.livenessProbe | indent 6 }} - readinessProbe: -{{ toYaml .Values.readinessProbe | indent 6 }} - resources: -{{ toYaml .Values.resources | indent 6 }} -{{- with .Values.extraContainers }} -{{ tpl . $ | indent 2 }} -{{- end }} -{{- with .Values.nodeSelector }} -nodeSelector: -{{ toYaml . | indent 2 }} -{{- end }} -{{- with .Values.affinity }} -affinity: -{{ toYaml . | indent 2 }} -{{- end }} -{{- with .Values.tolerations }} -tolerations: -{{ toYaml . | indent 2 }} -{{- end }} -volumes: - - name: config - configMap: - name: {{ template "grafana.fullname" . }} -{{- range .Values.extraConfigmapMounts }} - - name: {{ .name }} - configMap: - name: {{ .configMap }} -{{- end }} - {{- if .Values.dashboards }} - {{- range (keys .Values.dashboards | sortAlpha) }} - - name: dashboards-{{ . }} - configMap: - name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} - {{- end }} - {{- end }} - {{- if .Values.dashboardsConfigMaps }} - {{ $root := . }} - {{- range $provider, $name := .Values.dashboardsConfigMaps }} - - name: dashboards-{{ $provider }} - configMap: - name: {{ tpl $name $root }} - {{- end }} - {{- end }} - {{- if .Values.ldap.enabled }} - - name: ldap - secret: - {{- if .Values.ldap.existingSecret }} - secretName: {{ .Values.ldap.existingSecret }} - {{- else }} - secretName: {{ template "grafana.fullname" . }} - {{- end }} - items: - - key: ldap-toml - path: ldap.toml - {{- end }} -{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} - - name: storage - persistentVolumeClaim: - claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} -{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }} -# nothing -{{- else }} - - name: storage -{{- if .Values.persistence.inMemory.enabled }} - emptyDir: - medium: Memory -{{- if .Values.persistence.inMemory.sizeLimit }} - sizeLimit: {{ .Values.persistence.inMemory.sizeLimit }} -{{- end -}} -{{- else }} - emptyDir: {} -{{- end -}} -{{- end -}} -{{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - emptyDir: {} -{{- if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - configMap: - name: {{ template "grafana.fullname" . }}-config-dashboards -{{- end }} -{{- end }} -{{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - emptyDir: {} -{{- end -}} -{{- if .Values.sidecar.notifiers.enabled }} - - name: sc-notifiers-volume - emptyDir: {} -{{- end -}} -{{- range .Values.extraSecretMounts }} -{{- if .secretName }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - defaultMode: {{ .defaultMode }} -{{- else if .projected }} - - name: {{ .name }} - projected: {{- toYaml .projected | nindent 6 }} -{{- else if .csi }} - - name: {{ .name }} - csi: {{- toYaml .csi | nindent 6 }} -{{- end }} -{{- end }} -{{- range .Values.extraVolumeMounts }} - - name: {{ .name }} - {{- if .existingClaim }} - persistentVolumeClaim: - claimName: {{ .existingClaim }} - {{- else if .hostPath }} - hostPath: - path: {{ .hostPath }} - {{- else }} - emptyDir: {} - {{- end }} -{{- end }} -{{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - emptyDir: {} -{{- end -}} -{{- if .Values.extraContainerVolumes }} -{{ toYaml .Values.extraContainerVolumes | indent 2 }} -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/clusterrole.yaml b/deploy/helm/fadvisor/charts/grafana/templates/clusterrole.yaml deleted file mode 100644 index f09e065..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/clusterrole.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if and .Values.rbac.create (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} - name: {{ template "grafana.fullname" . }}-clusterrole -{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }} -rules: -{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }} -- apiGroups: [""] # "" indicates the core API group - resources: ["configmaps", "secrets"] - verbs: ["get", "watch", "list"] -{{- end}} -{{- with .Values.rbac.extraClusterRoleRules }} -{{ toYaml . | indent 0 }} -{{- end}} -{{- else }} -rules: [] -{{- end}} -{{- end}} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/clusterrolebinding.yaml b/deploy/helm/fadvisor/charts/grafana/templates/clusterrolebinding.yaml deleted file mode 100644 index 4accbfa..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "grafana.fullname" . }}-clusterrolebinding - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -subjects: - - kind: ServiceAccount - name: {{ template "grafana.serviceAccountName" . }} - namespace: {{ template "grafana.namespace" . }} -roleRef: - kind: ClusterRole -{{- if (not .Values.rbac.useExistingRole) }} - name: {{ template "grafana.fullname" . }}-clusterrole -{{- else }} - name: {{ .Values.rbac.useExistingRole }} -{{- end }} - apiGroup: rbac.authorization.k8s.io -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/configmap-dashboard-provider.yaml b/deploy/helm/fadvisor/charts/grafana/templates/configmap-dashboard-provider.yaml deleted file mode 100644 index 65d7385..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/configmap-dashboard-provider.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- if .Values.sidecar.dashboards.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} - name: {{ template "grafana.fullname" . }}-config-dashboards - namespace: {{ template "grafana.namespace" . }} -data: - provider.yaml: |- - apiVersion: 1 - providers: - - name: '{{ .Values.sidecar.dashboards.provider.name }}' - orgId: {{ .Values.sidecar.dashboards.provider.orgid }} - {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} - folder: '{{ .Values.sidecar.dashboards.provider.folder }}' - {{- end}} - type: {{ .Values.sidecar.dashboards.provider.type }} - disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} - allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} - updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} - options: - foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} - path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} -{{- end}} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/configmap.yaml b/deploy/helm/fadvisor/charts/grafana/templates/configmap.yaml deleted file mode 100644 index c72219f..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/configmap.yaml +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -data: -{{- if .Values.plugins }} - plugins: {{ join "," .Values.plugins }} -{{- end }} - grafana.ini: | -{{- range $key, $value := index .Values "grafana.ini" }} - [{{ $key }}] - {{- range $elem, $elemVal := $value }} - {{- if kindIs "invalid" $elemVal }} - {{ $elem }} = - {{- else if kindIs "string" $elemVal }} - {{ $elem }} = {{ tpl $elemVal $ }} - {{- else }} - {{ $elem }} = {{ $elemVal }} - {{- end }} - {{- end }} -{{- end }} - -{{- if .Values.datasources }} -{{ $root := . }} - {{- range $key, $value := .Values.datasources }} - {{ $key }}: | -{{ tpl (toYaml $value | indent 4) $root }} - {{- end -}} -{{- end -}} - -{{- if .Values.notifiers }} - {{- range $key, $value := .Values.notifiers }} - {{ $key }}: | -{{ toYaml $value | indent 4 }} - {{- end -}} -{{- end -}} - -{{- if .Values.dashboardProviders }} - {{- range $key, $value := .Values.dashboardProviders }} - {{ $key }}: | -{{ toYaml $value | indent 4 }} - {{- end -}} -{{- end -}} - -{{- if .Values.dashboards }} - download_dashboards.sh: | - #!/usr/bin/env sh - set -euf - {{- if .Values.dashboardProviders }} - {{- range $key, $value := .Values.dashboardProviders }} - {{- range $value.providers }} - mkdir -p {{ .options.path }} - {{- end }} - {{- end }} - {{- end }} - - {{- range $provider, $dashboards := .Values.dashboards }} - {{- range $key, $value := $dashboards }} - {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} - curl -skf \ - --connect-timeout 60 \ - --max-time 60 \ - {{- if not $value.b64content }} - -H "Accept: application/json" \ - {{- if $value.token }} - -H "Authorization: token {{ $value.token }}" \ - {{- end }} - -H "Content-Type: application/json;charset=UTF-8" \ - {{ end }} - {{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \ - > "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" - {{- end -}} - {{- end }} - {{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/dashboards-json-configmap.yaml b/deploy/helm/fadvisor/charts/grafana/templates/dashboards-json-configmap.yaml deleted file mode 100644 index 59e0be6..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/dashboards-json-configmap.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- if .Values.dashboards }} -{{ $files := .Files }} -{{- range $provider, $dashboards := .Values.dashboards }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }} - namespace: {{ template "grafana.namespace" $ }} - labels: - {{- include "grafana.labels" $ | nindent 4 }} - dashboard-provider: {{ $provider }} -{{- if $dashboards }} -data: -{{- $dashboardFound := false }} -{{- range $key, $value := $dashboards }} -{{- if (or (hasKey $value "json") (hasKey $value "file")) }} -{{- $dashboardFound = true }} -{{ print $key | indent 2 }}.json: -{{- if hasKey $value "json" }} - |- -{{ $value.json | indent 6 }} -{{- end }} -{{- if hasKey $value "file" }} -{{ toYaml ( $files.Get $value.file ) | indent 4}} -{{- end }} -{{- end }} -{{- end }} -{{- if not $dashboardFound }} - {} -{{- end }} -{{- end }} ---- -{{- end }} - -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/deployment.yaml b/deploy/helm/fadvisor/charts/grafana/templates/deployment.yaml deleted file mode 100644 index 8dbe5e1..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/deployment.yaml +++ /dev/null @@ -1,50 +0,0 @@ -{{ if (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc")) }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- if .Values.labels }} -{{ toYaml .Values.labels | indent 4 }} -{{- end }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: - {{- if and (not .Values.autoscaling.enabled) (.Values.replicas) }} - replicas: {{ .Values.replicas }} - {{- end }} - revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} -{{- with .Values.deploymentStrategy }} - strategy: -{{ toYaml . | trim | indent 4 }} -{{- end }} - template: - metadata: - labels: - {{- include "grafana.selectorLabels" . | nindent 8 }} -{{- with .Values.podLabels }} -{{ toYaml . | indent 8 }} -{{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} - checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} -{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} -{{- end }} -{{- if .Values.envRenderSecret }} - checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} -{{- end }} -{{- with .Values.podAnnotations }} -{{ toYaml . | indent 8 }} -{{- end }} - spec: - {{- include "grafana.pod" . | nindent 6 }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/headless-service.yaml b/deploy/helm/fadvisor/charts/grafana/templates/headless-service.yaml deleted file mode 100644 index 1df42e9..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/headless-service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset"))}} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "grafana.fullname" . }}-headless - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: - clusterIP: None - selector: - {{- include "grafana.selectorLabels" . | nindent 4 }} - type: ClusterIP - ports: - - protocol: TCP - port: 3000 - targetPort: 3000 -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/hpa.yaml b/deploy/helm/fadvisor/charts/grafana/templates/hpa.yaml deleted file mode 100644 index 9c186d7..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/hpa.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ template "grafana.fullname" . }} - labels: - app.kubernetes.io/name: {{ template "grafana.name" . }} - helm.sh/chart: {{ template "grafana.chart" . }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ template "grafana.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: -{{ toYaml .Values.autoscaling.metrics | indent 4 }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/image-renderer-deployment.yaml b/deploy/helm/fadvisor/charts/grafana/templates/image-renderer-deployment.yaml deleted file mode 100644 index 3976995..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/image-renderer-deployment.yaml +++ /dev/null @@ -1,119 +0,0 @@ -{{ if .Values.imageRenderer.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "grafana.fullname" . }}-image-renderer - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.imageRenderer.labels" . | nindent 4 }} -{{- if .Values.imageRenderer.labels }} -{{ toYaml .Values.imageRenderer.labels | indent 4 }} -{{- end }} -{{- with .Values.imageRenderer.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: - replicas: {{ .Values.imageRenderer.replicas }} - revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} - selector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} -{{- with .Values.imageRenderer.deploymentStrategy }} - strategy: -{{ toYaml . | trim | indent 4 }} -{{- end }} - template: - metadata: - labels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} -{{- with .Values.imageRenderer.podLabels }} -{{ toYaml . | indent 8 }} -{{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} -{{- with .Values.imageRenderer.podAnnotations }} -{{ toYaml . | indent 8 }} -{{- end }} - spec: - - {{- if .Values.imageRenderer.schedulerName }} - schedulerName: "{{ .Values.imageRenderer.schedulerName }}" - {{- end }} - {{- if .Values.imageRenderer.serviceAccountName }} - serviceAccountName: "{{ .Values.imageRenderer.serviceAccountName }}" - {{- end }} - {{- if .Values.imageRenderer.securityContext }} - securityContext: - {{- toYaml .Values.imageRenderer.securityContext | nindent 8 }} - {{- end }} - {{- if .Values.imageRenderer.hostAliases }} - hostAliases: - {{- toYaml .Values.imageRenderer.hostAliases | nindent 8 }} - {{- end }} - {{- if .Values.imageRenderer.priorityClassName }} - priorityClassName: {{ .Values.imageRenderer.priorityClassName }} - {{- end }} - {{- if .Values.imageRenderer.image.pullSecrets }} - imagePullSecrets: - {{- range .Values.imageRenderer.image.pullSecrets }} - - name: {{ . }} - {{- end}} - {{- end }} - containers: - - name: {{ .Chart.Name }}-image-renderer - {{- if .Values.imageRenderer.image.sha }} - image: "{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" - {{- else }} - image: "{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} - {{- if .Values.imageRenderer.command }} - command: - {{- range .Values.imageRenderer.command }} - - {{ . }} - {{- end }} - {{- end}} - ports: - - name: {{ .Values.imageRenderer.service.portName }} - containerPort: {{ .Values.imageRenderer.service.port }} - protocol: TCP - livenessProbe: - httpGet: - path: / - port: {{ .Values.imageRenderer.service.portName }} - env: - - name: HTTP_PORT - value: {{ .Values.imageRenderer.service.port | quote }} - {{- range $key, $value := .Values.imageRenderer.env }} - - name: {{ $key | quote }} - value: {{ $value | quote }} - {{- end }} - securityContext: - capabilities: - drop: ['all'] - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - volumeMounts: - - mountPath: /tmp - name: image-renderer-tmpfs - {{- with .Values.imageRenderer.resources }} - resources: -{{ toYaml . | indent 12 }} - {{- end }} - {{- with .Values.imageRenderer.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.imageRenderer.affinity }} - affinity: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.imageRenderer.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} - {{- end }} - volumes: - - name: image-renderer-tmpfs - emptyDir: {} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/image-renderer-network-policy.yaml b/deploy/helm/fadvisor/charts/grafana/templates/image-renderer-network-policy.yaml deleted file mode 100644 index f8ca73a..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/image-renderer-network-policy.yaml +++ /dev/null @@ -1,76 +0,0 @@ -{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitIngress) }} ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ template "grafana.fullname" . }}-image-renderer-ingress - namespace: {{ template "grafana.namespace" . }} - annotations: - comment: Limit image-renderer ingress traffic from grafana -spec: - podSelector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - {{- if .Values.imageRenderer.podLabels }} - {{ toYaml .Values.imageRenderer.podLabels | nindent 6 }} - {{- end }} - - policyTypes: - - Ingress - ingress: - - ports: - - port: {{ .Values.imageRenderer.service.port }} - protocol: TCP - from: - - namespaceSelector: - matchLabels: - name: {{ template "grafana.namespace" . }} - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 14 }} - {{- if .Values.podLabels }} - {{ toYaml .Values.podLabels | nindent 14 }} - {{- end }} -{{ end }} - -{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitEgress) }} ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ template "grafana.fullname" . }}-image-renderer-egress - namespace: {{ template "grafana.namespace" . }} - annotations: - comment: Limit image-renderer egress traffic to grafana -spec: - podSelector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - {{- if .Values.imageRenderer.podLabels }} - {{ toYaml .Values.imageRenderer.podLabels | nindent 6 }} - {{- end }} - - policyTypes: - - Egress - egress: - # allow dns resolution - - ports: - - port: 53 - protocol: UDP - - port: 53 - protocol: TCP - # talk only to grafana - - ports: - - port: {{ .Values.service.port }} - protocol: TCP - to: - - namespaceSelector: - matchLabels: - name: {{ template "grafana.namespace" . }} - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 14 }} - {{- if .Values.podLabels }} - {{ toYaml .Values.podLabels | nindent 14 }} - {{- end }} -{{ end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/image-renderer-service.yaml b/deploy/helm/fadvisor/charts/grafana/templates/image-renderer-service.yaml deleted file mode 100644 index f29586c..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/image-renderer-service.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{{ if .Values.imageRenderer.enabled }} -{{ if .Values.imageRenderer.service.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "grafana.fullname" . }}-image-renderer - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.imageRenderer.labels" . | nindent 4 }} -{{- if .Values.imageRenderer.service.labels }} -{{ toYaml .Values.imageRenderer.service.labels | indent 4 }} -{{- end }} -{{- with .Values.imageRenderer.service.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: - type: ClusterIP - {{- if .Values.imageRenderer.service.clusterIP }} - clusterIP: {{ .Values.imageRenderer.service.clusterIP }} - {{end}} - ports: - - name: {{ .Values.imageRenderer.service.portName }} - port: {{ .Values.imageRenderer.service.port }} - protocol: TCP - targetPort: {{ .Values.imageRenderer.service.targetPort }} - selector: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} -{{ end }} -{{ end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/ingress.yaml b/deploy/helm/fadvisor/charts/grafana/templates/ingress.yaml deleted file mode 100644 index 7699cec..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/ingress.yaml +++ /dev/null @@ -1,78 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} -{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} -{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}} -{{- $fullName := include "grafana.fullname" . -}} -{{- $servicePort := .Values.service.port -}} -{{- $ingressPath := .Values.ingress.path -}} -{{- $ingressPathType := .Values.ingress.pathType -}} -{{- $extraPaths := .Values.ingress.extraPaths -}} -apiVersion: {{ include "grafana.ingress.apiVersion" . }} -kind: Ingress -metadata: - name: {{ $fullName }} - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- if .Values.ingress.labels }} -{{ toYaml .Values.ingress.labels | indent 4 }} -{{- end }} - {{- if .Values.ingress.annotations }} - annotations: - {{- range $key, $value := .Values.ingress.annotations }} - {{ $key }}: {{ tpl $value $ | quote }} - {{- end }} - {{- end }} -spec: - {{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }} - ingressClassName: {{ .Values.ingress.ingressClassName }} - {{- end -}} -{{- if .Values.ingress.tls }} - tls: -{{ tpl (toYaml .Values.ingress.tls) $ | indent 4 }} -{{- end }} - rules: - {{- if .Values.ingress.hosts }} - {{- range .Values.ingress.hosts }} - - host: {{ tpl . $}} - http: - paths: -{{- if $extraPaths }} -{{ toYaml $extraPaths | indent 10 }} -{{- end }} - - path: {{ $ingressPath }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $fullName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- end }} - {{- else }} - - http: - paths: - - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $fullName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- if $ingressPath }} - path: {{ $ingressPath }} - {{- end }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - {{- end -}} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/networkpolicy.yaml b/deploy/helm/fadvisor/charts/grafana/templates/networkpolicy.yaml deleted file mode 100644 index fc24382..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/networkpolicy.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{{- if .Values.networkPolicy.enabled }} -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- if .Values.labels }} -{{ toYaml .Values.labels | indent 4 }} -{{- end }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - ingress: - - ports: - - port: {{ .Values.service.targetPort }} - {{- if not .Values.networkPolicy.allowExternal }} - from: - - podSelector: - matchLabels: - {{ template "grafana.fullname" . }}-client: "true" - {{- if .Values.networkPolicy.explicitNamespacesSelector }} - namespaceSelector: - {{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} - {{- end }} - - podSelector: - matchLabels: - {{- include "grafana.labels" . | nindent 14 }} - role: read - {{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/poddisruptionbudget.yaml b/deploy/helm/fadvisor/charts/grafana/templates/poddisruptionbudget.yaml deleted file mode 100644 index 61813a4..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.podDisruptionBudget }} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- if .Values.labels }} -{{ toYaml .Values.labels | indent 4 }} -{{- end }} -spec: -{{- if .Values.podDisruptionBudget.minAvailable }} - minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} -{{- end }} -{{- if .Values.podDisruptionBudget.maxUnavailable }} - maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} -{{- end }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/podsecuritypolicy.yaml b/deploy/helm/fadvisor/charts/grafana/templates/podsecuritypolicy.yaml deleted file mode 100644 index 7de6c02..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/podsecuritypolicy.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{- if .Values.rbac.pspEnabled }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "grafana.fullname" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - {{- if .Values.rbac.pspUseAppArmor }} - apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' - apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - {{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - # Default set from Docker, with DAC_OVERRIDE and CHOWN - - ALL - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'csi' - - 'secret' - - 'downwardAPI' - - 'persistentVolumeClaim' - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: false -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/pvc.yaml b/deploy/helm/fadvisor/charts/grafana/templates/pvc.yaml deleted file mode 100644 index 8d93f5c..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/pvc.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.persistence.annotations }} - annotations: -{{ toYaml . | indent 4 }} - {{- end }} - {{- with .Values.persistence.finalizers }} - finalizers: -{{ toYaml . | indent 4 }} - {{- end }} -spec: - accessModes: - {{- range .Values.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.persistence.size | quote }} - {{- if .Values.persistence.storageClassName }} - storageClassName: {{ .Values.persistence.storageClassName }} - {{- end -}} - {{- with .Values.persistence.selectorLabels }} - selector: - matchLabels: -{{ toYaml . | indent 6 }} - {{- end }} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/role.yaml b/deploy/helm/fadvisor/charts/grafana/templates/role.yaml deleted file mode 100644 index 6a1890f..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/role.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} -apiVersion: {{ template "grafana.rbac.apiVersion" . }} -kind: Role -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }} -rules: -{{- if .Values.rbac.pspEnabled }} -- apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ template "grafana.fullname" . }}] -{{- end }} -{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }} -- apiGroups: [""] # "" indicates the core API group - resources: ["configmaps", "secrets"] - verbs: ["get", "watch", "list"] -{{- end }} -{{- with .Values.rbac.extraRoleRules }} -{{ toYaml . | indent 0 }} -{{- end}} -{{- else }} -rules: [] -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/rolebinding.yaml b/deploy/helm/fadvisor/charts/grafana/templates/rolebinding.yaml deleted file mode 100644 index e010725..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/rolebinding.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: {{ template "grafana.rbac.apiVersion" . }} -kind: RoleBinding -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role -{{- if (not .Values.rbac.useExistingRole) }} - name: {{ template "grafana.fullname" . }} -{{- else }} - name: {{ .Values.rbac.useExistingRole }} -{{- end }} -subjects: -- kind: ServiceAccount - name: {{ template "grafana.serviceAccountName" . }} - namespace: {{ template "grafana.namespace" . }} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/secret-env.yaml b/deploy/helm/fadvisor/charts/grafana/templates/secret-env.yaml deleted file mode 100644 index 5c09313..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/secret-env.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.envRenderSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "grafana.fullname" . }}-env - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -type: Opaque -data: -{{- range $key, $val := .Values.envRenderSecret }} - {{ $key }}: {{ $val | b64enc | quote }} -{{- end -}} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/secret.yaml b/deploy/helm/fadvisor/charts/grafana/templates/secret.yaml deleted file mode 100644 index 6d06cf5..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/secret.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -type: Opaque -data: - {{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} - admin-user: {{ .Values.adminUser | b64enc | quote }} - {{- if .Values.adminPassword }} - admin-password: {{ .Values.adminPassword | b64enc | quote }} - {{- else }} - admin-password: {{ template "grafana.password" . }} - {{- end }} - {{- end }} - {{- if not .Values.ldap.existingSecret }} - ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} - {{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/service.yaml b/deploy/helm/fadvisor/charts/grafana/templates/service.yaml deleted file mode 100644 index ba84ef9..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/service.yaml +++ /dev/null @@ -1,51 +0,0 @@ -{{ if .Values.service.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- if .Values.service.labels }} -{{ toYaml .Values.service.labels | indent 4 }} -{{- end }} -{{- with .Values.service.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: -{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} - type: ClusterIP - {{- if .Values.service.clusterIP }} - clusterIP: {{ .Values.service.clusterIP }} - {{end}} -{{- else if eq .Values.service.type "LoadBalancer" }} - type: {{ .Values.service.type }} - {{- if .Values.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.service.loadBalancerIP }} - {{- end }} - {{- if .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: -{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} - {{- end -}} -{{- else }} - type: {{ .Values.service.type }} -{{- end }} -{{- if .Values.service.externalIPs }} - externalIPs: -{{ toYaml .Values.service.externalIPs | indent 4 }} -{{- end }} - ports: - - name: {{ .Values.service.portName }} - port: {{ .Values.service.port }} - protocol: TCP - targetPort: {{ .Values.service.targetPort }} -{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} - nodePort: {{.Values.service.nodePort}} -{{ end }} - {{- if .Values.extraExposePorts }} - {{- tpl (toYaml .Values.extraExposePorts) . | indent 4 }} - {{- end }} - selector: - {{- include "grafana.selectorLabels" . | nindent 4 }} -{{ end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/serviceaccount.yaml b/deploy/helm/fadvisor/charts/grafana/templates/serviceaccount.yaml deleted file mode 100644 index 7576eee..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- with .Values.serviceAccount.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} - name: {{ template "grafana.serviceAccountName" . }} - namespace: {{ template "grafana.namespace" . }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/servicemonitor.yaml b/deploy/helm/fadvisor/charts/grafana/templates/servicemonitor.yaml deleted file mode 100644 index 2328852..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/servicemonitor.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.serviceMonitor.enabled }} ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "grafana.fullname" . }} - {{- if .Values.serviceMonitor.namespace }} - namespace: {{ .Values.serviceMonitor.namespace }} - {{- end }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- if .Values.serviceMonitor.labels }} - {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} - {{- end }} -spec: - endpoints: - - interval: {{ .Values.serviceMonitor.interval }} - {{- if .Values.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} - {{- end }} - honorLabels: true - port: {{ .Values.service.portName }} - path: {{ .Values.serviceMonitor.path }} - scheme: {{ .Values.serviceMonitor.scheme }} - {{- if .Values.serviceMonitor.tlsConfig }} - tlsConfig: - {{- toYaml .Values.serviceMonitor.tlsConfig | nindent 6 }} - {{- end }} - {{- if .Values.serviceMonitor.relabelings }} - relabelings: - {{- toYaml .Values.serviceMonitor.relabelings | nindent 4 }} - {{- end }} - jobLabel: "{{ .Release.Name }}" - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 8 }} - namespaceSelector: - matchNames: - - {{ .Release.Namespace }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/statefulset.yaml b/deploy/helm/fadvisor/charts/grafana/templates/statefulset.yaml deleted file mode 100644 index ad3dd06..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/statefulset.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "grafana.fullname" . }} - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -{{- with .Values.annotations }} - annotations: -{{ toYaml . | indent 4 }} -{{- end }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - serviceName: {{ template "grafana.fullname" . }}-headless - template: - metadata: - labels: - {{- include "grafana.selectorLabels" . | nindent 8 }} -{{- with .Values.podLabels }} -{{ toYaml . | indent 8 }} -{{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} - checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} - {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} -{{- end }} -{{- with .Values.podAnnotations }} -{{ toYaml . | indent 8 }} -{{- end }} - spec: - {{- include "grafana.pod" . | nindent 6 }} - volumeClaimTemplates: - - metadata: - name: storage - spec: - accessModes: {{ .Values.persistence.accessModes }} - storageClassName: {{ .Values.persistence.storageClassName }} - resources: - requests: - storage: {{ .Values.persistence.size }} - {{- with .Values.persistence.selectorLabels }} - selector: - matchLabels: -{{ toYaml . | indent 10 }} - {{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/tests/test-configmap.yaml b/deploy/helm/fadvisor/charts/grafana/templates/tests/test-configmap.yaml deleted file mode 100644 index ff53aaf..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/tests/test-configmap.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if .Values.testFramework.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "grafana.fullname" . }}-test - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -data: - run.sh: |- - @test "Test Health" { - url="http://{{ template "grafana.fullname" . }}/api/health" - - code=$(wget --server-response --spider --timeout 10 --tries 1 ${url} 2>&1 | awk '/^ HTTP/{print $2}') - [ "$code" == "200" ] - } -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/tests/test-podsecuritypolicy.yaml b/deploy/helm/fadvisor/charts/grafana/templates/tests/test-podsecuritypolicy.yaml deleted file mode 100644 index 58b4649..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/tests/test-podsecuritypolicy.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "grafana.fullname" . }}-test - labels: - {{- include "grafana.labels" . | nindent 4 }} -spec: - allowPrivilegeEscalation: true - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - fsGroup: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - runAsUser: - rule: RunAsAny - volumes: - - configMap - - downwardAPI - - emptyDir - - projected - - csi - - secret -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/tests/test-role.yaml b/deploy/helm/fadvisor/charts/grafana/templates/tests/test-role.yaml deleted file mode 100644 index 6b10677..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/tests/test-role.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ template "grafana.fullname" . }}-test - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ template "grafana.fullname" . }}-test] -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/tests/test-rolebinding.yaml b/deploy/helm/fadvisor/charts/grafana/templates/tests/test-rolebinding.yaml deleted file mode 100644 index 58fa5e7..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/tests/test-rolebinding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ template "grafana.fullname" . }}-test - namespace: {{ template "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ template "grafana.fullname" . }}-test -subjects: -- kind: ServiceAccount - name: {{ template "grafana.serviceAccountNameTest" . }} - namespace: {{ template "grafana.namespace" . }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/tests/test-serviceaccount.yaml b/deploy/helm/fadvisor/charts/grafana/templates/tests/test-serviceaccount.yaml deleted file mode 100644 index 5c33507..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/tests/test-serviceaccount.yaml +++ /dev/null @@ -1,9 +0,0 @@ -{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - name: {{ template "grafana.serviceAccountNameTest" . }} - namespace: {{ template "grafana.namespace" . }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/templates/tests/test.yaml b/deploy/helm/fadvisor/charts/grafana/templates/tests/test.yaml deleted file mode 100644 index cdc86e5..0000000 --- a/deploy/helm/fadvisor/charts/grafana/templates/tests/test.yaml +++ /dev/null @@ -1,48 +0,0 @@ -{{- if .Values.testFramework.enabled }} -apiVersion: v1 -kind: Pod -metadata: - name: {{ template "grafana.fullname" . }}-test - labels: - {{- include "grafana.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test-success - namespace: {{ template "grafana.namespace" . }} -spec: - serviceAccountName: {{ template "grafana.serviceAccountNameTest" . }} - {{- if .Values.testFramework.securityContext }} - securityContext: {{ toYaml .Values.testFramework.securityContext | nindent 4 }} - {{- end }} - {{- if .Values.image.pullSecrets }} - imagePullSecrets: - {{- range .Values.image.pullSecrets }} - - name: {{ . }} - {{- end}} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 4 }} - {{- end }} - {{- with .Values.affinity }} - affinity: -{{ toYaml . | indent 4 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 4 }} - {{- end }} - containers: - - name: {{ .Release.Name }}-test - image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" - imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" - command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] - volumeMounts: - - mountPath: /tests - name: tests - readOnly: true - volumes: - - name: tests - configMap: - name: {{ template "grafana.fullname" . }}-test - restartPolicy: Never -{{- end }} diff --git a/deploy/helm/fadvisor/charts/grafana/values.yaml b/deploy/helm/fadvisor/charts/grafana/values.yaml deleted file mode 100644 index dd09cae..0000000 --- a/deploy/helm/fadvisor/charts/grafana/values.yaml +++ /dev/null @@ -1,797 +0,0 @@ -fullnameOverride: grafana - -rbac: - create: true - ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) - # useExistingRole: name-of-some-(cluster)role - pspEnabled: true - pspUseAppArmor: true - namespaced: false - extraRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] - extraClusterRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] -serviceAccount: - create: true - name: - nameTest: -# annotations: -# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here - autoMount: true - -replicas: 1 - -## Create a headless service for the deployment -headlessService: false - -## Create HorizontalPodAutoscaler object for deployment type -# -autoscaling: - enabled: false -# minReplicas: 1 -# maxReplicas: 10 -# metrics: -# - type: Resource -# resource: -# name: cpu -# targetAverageUtilization: 60 -# - type: Resource -# resource: -# name: memory -# targetAverageUtilization: 60 - -## See `kubectl explain poddisruptionbudget.spec` for more -## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ -podDisruptionBudget: {} -# minAvailable: 1 -# maxUnavailable: 1 - -## See `kubectl explain deployment.spec.strategy` for more -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy -deploymentStrategy: - type: RollingUpdate - -readinessProbe: - httpGet: - path: /api/health - port: 3000 - -livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 60 - timeoutSeconds: 30 - failureThreshold: 10 - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: "default-scheduler" - -image: - repository: grafana/grafana - tag: 8.3.3 - sha: "" - pullPolicy: IfNotPresent - - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistrKeySecretName - -testFramework: - enabled: true - image: "bats/bats" - tag: "v1.4.1" - imagePullPolicy: IfNotPresent - securityContext: {} - -securityContext: - runAsUser: 472 - runAsGroup: 472 - fsGroup: 472 - -containerSecurityContext: - {} - -extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/grafana/ssl/ - # subPath: certificates.crt # (optional) - # configMap: certs-configmap - # readOnly: true - - -extraEmptyDirMounts: [] - # - name: provisioning-notifiers - # mountPath: /etc/grafana/provisioning/notifiers - - -# Apply extra labels to common labels. -extraLabels: {} - -## Assign a PriorityClassName to pods if set -# priorityClassName: - -downloadDashboardsImage: - repository: curlimages/curl - tag: 7.73.0 - sha: "" - pullPolicy: IfNotPresent - -downloadDashboards: - env: {} - envFromSecret: "" - resources: {} - -## Pod Annotations -# podAnnotations: {} - -## Pod Labels -# podLabels: {} - -podPortName: grafana - -## Deployment annotations -# annotations: {} - -## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). -## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. -## ref: http://kubernetes.io/docs/user-guide/services/ -## -service: - enabled: true - type: NodePort - nodePort: 31112 - port: 80 - targetPort: 3000 - # targetPort: 4181 To be used with a proxy extraContainer - annotations: {} - labels: {} - portName: service - -serviceMonitor: - ## If true, a ServiceMonitor CRD is created for a prometheus operator - ## https://github.com/coreos/prometheus-operator - ## - enabled: false - path: /metrics - # namespace: monitoring (defaults to use the namespace this chart is deployed to) - labels: {} - interval: 1m - scheme: http - tlsConfig: {} - scrapeTimeout: 30s - relabelings: [] - -extraExposePorts: [] - # - name: keycloak - # port: 8080 - # targetPort: 8080 - # type: ClusterIP - -# overrides pod.spec.hostAliases in the grafana deployment's pods -hostAliases: [] - # - ip: "1.2.3.4" - # hostnames: - # - "my.host.com" - -ingress: - enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - # Values can be templated - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - labels: {} - path: / - - # pathType is only for k8s >= 1.1= - pathType: Prefix - - hosts: - - chart-example.local - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - ## Or for k8s > 1.19 - # - path: /* - # pathType: Prefix - # backend: - # service: - # name: ssl-redirect - # port: - # name: use-annotation - - - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi - -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -# -nodeSelector: {} - -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Affinity for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -extraInitContainers: [] - -## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod -extraContainers: "" -# extraContainers: | -# - name: proxy -# image: quay.io/gambol99/keycloak-proxy:latest -# args: -# - -provider=github -# - -client-id= -# - -client-secret= -# - -github-org= -# - -email-domain=* -# - -cookie-secret= -# - -http-address=http://0.0.0.0:4181 -# - -upstream-url=http://127.0.0.1:3000 -# ports: -# - name: proxy-web -# containerPort: 4181 - -## Volumes that can be used in init containers that will not be mounted to deployment pods -extraContainerVolumes: [] -# - name: volume-from-secret -# secret: -# secretName: secret-to-mount -# - name: empty-dir-volume -# emptyDir: {} - -## Enable persistence using Persistent Volume Claims -## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ -## -persistence: - type: pvc - enabled: false - # storageClassName: default - accessModes: - - ReadWriteOnce - size: 10Gi - # annotations: {} - finalizers: - - kubernetes.io/pvc-protection - # selectorLabels: {} - # subPath: "" - # existingClaim: - - ## If persistence is not enabled, this allows to mount the - ## local storage in-memory to improve performance - ## - inMemory: - enabled: false - ## The maximum usage on memory medium EmptyDir would be - ## the minimum value between the SizeLimit specified - ## here and the sum of memory limits of all containers in a pod - ## - # sizeLimit: 300Mi - -initChownData: - ## If false, data ownership will not be reset at startup - ## This allows the prometheus-server to be run with an arbitrary user - ## - enabled: true - - ## initChownData container image - ## - image: - repository: busybox - tag: "1.31.1" - sha: "" - pullPolicy: IfNotPresent - - ## initChownData resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - - -# Administrator credentials when not using an existing secret (see below) -adminUser: admin -adminPassword: admin - -# Use an existing secret for the admin user. -admin: - existingSecret: "" - userKey: admin-user - passwordKey: admin-password - -## Define command to be executed at startup by grafana container -## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) -## Default is "run.sh" as defined in grafana's Dockerfile -# command: -# - "sh" -# - "/run.sh" - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: - -## Extra environment variables that will be pass onto deployment pods -## -## to provide grafana with access to CloudWatch on AWS EKS: -## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) -## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the -## same oidc eks provider as noted before (same as the existing line) -## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name -## -## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", -## -## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess -## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) -## -## env: -## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here -## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token -## AWS_REGION: us-east-1 -## -## 5. uncomment the EKS section in extraSecretMounts: below -## 6. uncomment the annotation section in the serviceAccount: above -## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn - -env: {} - -## "valueFrom" environment variable references that will be added to deployment pods -## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core -## Renders in container spec as: -## env: -## ... -## - name: -## valueFrom: -## -envValueFrom: {} - -## The name of a secret in the same kubernetes namespace which contain values to be added to the environment -## This can be useful for auth tokens, etc. Value is templated. -envFromSecret: "" - -## Sensible environment variables that will be rendered as new secret object -## This can be useful for auth tokens, etc -envRenderSecret: {} - -## The names of secrets in the same kubernetes namespace which contain values to be added to the environment -## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. -envFromSecrets: [] -## - name: secret-name -## optional: true - -# Inject Kubernetes services as environment variables. -# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables -enableServiceLinks: true - -## Additional grafana server secret mounts -# Defines additional mounts with secrets. Secrets must be manually created in the namespace. -extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # secretName: grafana-secret-files - # readOnly: true - # subPath: "" - # - # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) - # - name: aws-iam-token - # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount - # readOnly: true - # projected: - # defaultMode: 420 - # sources: - # - serviceAccountToken: - # audience: sts.amazonaws.com - # expirationSeconds: 86400 - # path: token - # - # for CSI e.g. Azure Key Vault use the following - # - name: secrets-store-inline - # mountPath: /run/secrets - # readOnly: true - # csi: - # driver: secrets-store.csi.k8s.io - # readOnly: true - # volumeAttributes: - # secretProviderClass: "akv-grafana-spc" - # nodePublishSecretRef: # Only required when using service principal mode - # name: grafana-akv-creds # Only required when using service principal mode - -## Additional grafana server volume mounts -# Defines additional volume mounts. -extraVolumeMounts: [] - # - name: extra-volume-0 - # mountPath: /mnt/volume0 - # readOnly: true - # existingClaim: volume-claim - # - name: extra-volume-1 - # mountPath: /mnt/volume1 - # readOnly: true - # hostPath: /usr/shared/ - -## Pass the plugins you want installed as a list. -## -plugins: [] - # - digrich-bubblechart-panel - # - grafana-clock-panel - -## Configure grafana datasources -## ref: http://docs.grafana.org/administration/provisioning/#datasources -## -datasources: - datasources.yaml: - apiVersion: 1 - datasources: - - name: Prometheus - type: prometheus - url: http://fadvisor-prometheus-server.crane-system.svc.cluster.local -# url: http://kvass-thanos.kube-system:9090 - access: proxy - isDefault: true - -## Configure notifiers -## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels -## -notifiers: {} -# notifiers.yaml: -# notifiers: -# - name: email-notifier -# type: email -# uid: email1 -# # either: -# org_id: 1 -# # or -# org_name: Main Org. -# is_default: true -# settings: -# addresses: an_email_address@example.com -# delete_notifiers: - -## Configure grafana dashboard providers -## ref: http://docs.grafana.org/administration/provisioning/#dashboards -## -## `path` must be /var/lib/grafana/dashboards/ -## -dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'default' - orgId: 1 - folder: '' - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/default - -## Configure grafana dashboard to import -## NOTE: To use dashboards you must also enable/configure dashboardProviders -## ref: https://grafana.com/dashboards -## -## dashboards per provider, use provider name as key. -## -dashboards: - default: -# some-dashboard: -# json: | -# $RAW_JSON - cluster-costs: - file: dashboards/cluster-costs.json - costs-dimension: - file: dashboards/costs-dimension.json - namespace-costs: - file: dashboards/namespace-costs.json - -# prometheus-stats: -# gnetId: 2 -# revision: 2 -# datasource: Prometheus -# local-dashboard: -# url: https://example.com/repository/test.json -# token: '' -# local-dashboard-base64: -# url: https://example.com/repository/test-b64.json -# token: '' -# b64content: true - -## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. -## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. -## ConfigMap data example: -## -## data: -## example-dashboard.json: | -## RAW_JSON -## -dashboardsConfigMaps: {} -# default: "" - -## Grafana's primary configuration -## NOTE: values in map will be converted to ini format -## ref: http://docs.grafana.org/installation/configuration/ -## -grafana.ini: - paths: - data: /var/lib/grafana/ - logs: /var/log/grafana - plugins: /var/lib/grafana/plugins - provisioning: /etc/grafana/provisioning - analytics: - check_for_updates: true - log: - mode: console - grafana_net: - url: https://grafana.net -## grafana Authentication can be enabled with the following values on grafana.ini - # server: - # The full public facing url you use in browser, used for redirects and emails - # root_url: - # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana - # auth.github: - # enabled: false - # allow_sign_up: false - # scopes: user:email,read:org - # auth_url: https://github.com/login/oauth/authorize - # token_url: https://github.com/login/oauth/access_token - # api_url: https://api.github.com/user - # team_ids: - # allowed_organizations: - # client_id: - # client_secret: -## LDAP Authentication can be enabled with the following values on grafana.ini -## NOTE: Grafana will fail to start if the value for ldap.toml is invalid - # auth.ldap: - # enabled: true - # allow_sign_up: true - # config_file: /etc/grafana/ldap.toml - -## Grafana's LDAP configuration -## Templated by the template in _helpers.tpl -## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled -## ref: http://docs.grafana.org/installation/configuration/#auth-ldap -## ref: http://docs.grafana.org/installation/ldap/#configuration -ldap: - enabled: false - # `existingSecret` is a reference to an existing secret containing the ldap configuration - # for Grafana in a key `ldap-toml`. - existingSecret: "" - # `config` is the content of `ldap.toml` that will be stored in the created secret - config: "" - # config: |- - # verbose_logging = true - - # [[servers]] - # host = "my-ldap-server" - # port = 636 - # use_ssl = true - # start_tls = false - # ssl_skip_verify = false - # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" - -## Grafana's SMTP configuration -## NOTE: To enable, grafana.ini must be configured with smtp.enabled -## ref: http://docs.grafana.org/installation/configuration/#smtp -smtp: - # `existingSecret` is a reference to an existing secret containing the smtp configuration - # for Grafana. - existingSecret: "" - userKey: "user" - passwordKey: "password" - -## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders -## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards -sidecar: - image: - repository: quay.io/kiwigrid/k8s-sidecar - tag: 1.14.2 - sha: "" - imagePullPolicy: IfNotPresent - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - securityContext: {} - # skipTlsVerify Set to true to skip tls verification for kube api calls - # skipTlsVerify: true - enableUniqueFilenames: false - dashboards: - enabled: true - SCProvider: true - # label that the configmaps with dashboards are marked with - label: grafana_dashboard - # value of label that the configmaps with dashboards are set to - labelValue: null - # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) - folder: /tmp/dashboards - # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead - defaultFolderName: null - # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces. - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # If specified, the sidecar will look for annotation with this name to create folder and put graph here. - # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. - folderAnnotation: null - # Absolute path to shell script to execute after a configmap got reloaded - script: null - # provider configuration that lets grafana manage the dashboards - provider: - # name of the provider, should be unique - name: sidecarProvider - # orgid as configured in grafana - orgid: 1 - # folder in which the dashboards should be imported in grafana - folder: '' - # type of the provider - type: file - # disableDelete to activate a import-only behaviour - disableDelete: false - # allow updating provisioned dashboards from the UI - allowUiUpdates: false - # allow Grafana to replicate dashboard structure from filesystem - foldersFromFilesStructure: false - # Additional dashboard sidecar volume mounts - extraMounts: [] - datasources: - enabled: false - # label that the configmaps with datasources are marked with - label: grafana_datasource - # value of label that the configmaps with datasources are set to - labelValue: null - # If specified, the sidecar will search for datasource config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # Endpoint to send request to reload datasources - reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" - notifiers: - enabled: false - # label that the configmaps with notifiers are marked with - label: grafana_notifier - # If specified, the sidecar will search for notifier config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # search in configmap, secret or both - resource: both - -## Override the deployment namespace -## -namespaceOverride: "" - -## Number of old ReplicaSets to retain -## -revisionHistoryLimit: 10 - -## Add a seperate remote image renderer deployment/service -imageRenderer: - # Enable the image-renderer deployment & service - enabled: false - replicas: 1 - image: - # image-renderer Image repository - repository: grafana/grafana-image-renderer - # image-renderer Image tag - tag: latest - # image-renderer Image sha (optional) - sha: "" - # image-renderer ImagePullPolicy - pullPolicy: Always - # extra environment variables - env: - HTTP_HOST: "0.0.0.0" - # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 - # RENDERING_MODE: clustered - # IGNORE_HTTPS_ERRORS: true - # image-renderer deployment serviceAccount - serviceAccountName: "" - # image-renderer deployment securityContext - securityContext: {} - # image-renderer deployment Host Aliases - hostAliases: [] - # image-renderer deployment priority class - priorityClassName: '' - service: - # Enable the image-renderer service - enabled: true - # image-renderer service port name - portName: 'http' - # image-renderer service port used by both service and deployment - port: 8081 - targetPort: 8081 - # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana - grafanaProtocol: http - # In case a sub_path is used this needs to be added to the image renderer callback - grafanaSubPath: "" - # name of the image-renderer port on the pod - podPortName: http - # number of image-renderer replica sets to keep - revisionHistoryLimit: 10 - networkPolicy: - # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods - limitIngress: true - # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods - limitEgress: false - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - -networkPolicy: - ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. - ## - enabled: false - ## @param networkPolicy.allowExternal Don't require client label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to grafana port defined. - ## When true, grafana will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed - ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the grafana. - ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. - ## - ## Example: - ## explicitNamespacesSelector: - ## matchLabels: - ## role: frontend - ## matchExpressions: - ## - {key: role, operator: In, values: [frontend]} - ## - explicitNamespacesSelector: {} - -# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option -enableKubeBackwardCompatibility: false diff --git a/deploy/helm/fadvisor/charts/kube-state-metrics/.helmignore b/deploy/helm/fadvisor/charts/kube-state-metrics/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/deploy/helm/fadvisor/charts/kube-state-metrics/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/deploy/helm/fadvisor/charts/kube-state-metrics/Chart.yaml b/deploy/helm/fadvisor/charts/kube-state-metrics/Chart.yaml deleted file mode 100644 index 99f83e5..0000000 --- a/deploy/helm/fadvisor/charts/kube-state-metrics/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: kube-state-metrics -description: A Helm chart for kube-state-metrics - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" diff --git a/deploy/helm/fadvisor/charts/kube-state-metrics/templates/_helpers.tpl b/deploy/helm/fadvisor/charts/kube-state-metrics/templates/_helpers.tpl deleted file mode 100644 index 42af234..0000000 --- a/deploy/helm/fadvisor/charts/kube-state-metrics/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "prometheus.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "prometheus.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "prometheus.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "prometheus.labels" -}} -helm.sh/chart: {{ include "prometheus.chart" . }} -{{ include "prometheus.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "prometheus.selectorLabels" -}} -app.kubernetes.io/name: {{ include "prometheus.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "prometheus.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "prometheus.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/kube-state-metrics/templates/deployment.yaml b/deploy/helm/fadvisor/charts/kube-state-metrics/templates/deployment.yaml deleted file mode 100644 index 0a7b4c3..0000000 --- a/deploy/helm/fadvisor/charts/kube-state-metrics/templates/deployment.yaml +++ /dev/null @@ -1,54 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: {{ .Values.KubeStateMetricsName }} - name: {{ .Values.KubeStateMetricsName }} - namespace: {{ .Release.Namespace }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: {{ .Values.KubeStateMetricsName }} - template: - metadata: - labels: - app: {{ .Values.KubeStateMetricsName }} - spec: - serviceAccountName: {{ .Values.KubeStateMetricsName }} - containers: - - name: {{ .Values.KubeStateMetricsName }} - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} - livenessProbe: - httpGet: - path: /healthz - port: 8080 - initialDelaySeconds: 5 - timeoutSeconds: 5 - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 8081 - name: telemetry - readinessProbe: - httpGet: - path: / - port: 8081 - initialDelaySeconds: 5 - timeoutSeconds: 5 - {{- with .Values.resources }} - resources: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/deploy/helm/fadvisor/charts/kube-state-metrics/templates/rbac.yaml b/deploy/helm/fadvisor/charts/kube-state-metrics/templates/rbac.yaml deleted file mode 100644 index c730980..0000000 --- a/deploy/helm/fadvisor/charts/kube-state-metrics/templates/rbac.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ .Values.KubeStateMetricsName }} - namespace: {{ .Release.Namespace }} - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ .Values.KubeStateMetricsName }} -rules: - - apiGroups: [""] - resources: - - configmaps - - secrets - - nodes - - pods - - services - - resourcequotas - - replicationcontrollers - - limitranges - - persistentvolumeclaims - - persistentvolumes - - namespaces - - endpoints - verbs: ["list", "watch"] - - apiGroups: ["extensions"] - resources: - - daemonsets - - deployments - - replicasets - verbs: ["list", "watch"] - - apiGroups: ["apps"] - resources: - - statefulsets - verbs: ["list", "watch"] - - apiGroups: ["batch"] - resources: - - cronjobs - - jobs - verbs: ["list", "watch"] - - apiGroups: ["autoscaling"] - resources: - - horizontalpodautoscalers - verbs: ["list", "watch"] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ .Values.KubeStateMetricsName }} -subjects: - - kind: ServiceAccount - name: {{ .Values.KubeStateMetricsName }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ .Values.KubeStateMetricsName }} - apiGroup: rbac.authorization.k8s.io diff --git a/deploy/helm/fadvisor/charts/kube-state-metrics/templates/service.yaml b/deploy/helm/fadvisor/charts/kube-state-metrics/templates/service.yaml deleted file mode 100644 index 47ce26f..0000000 --- a/deploy/helm/fadvisor/charts/kube-state-metrics/templates/service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '{{ .Values.service.port }}' - labels: - app: {{ .Values.KubeStateMetricsName }} - name: {{ .Values.KubeStateMetricsName }} - namespace: {{ .Release.Namespace }} -spec: - type: {{ .Values.service.type }} - ports: - - name: http-metrics - protocol: TCP - port: {{ .Values.service.port }} - targetPort: http-metrics - {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} - nodePort: {{ .Values.service.nodePort }} - {{- end }} - selector: - app: {{ .Values.KubeStateMetricsName }} \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/kube-state-metrics/values.yaml b/deploy/helm/fadvisor/charts/kube-state-metrics/values.yaml deleted file mode 100644 index 05ed298..0000000 --- a/deploy/helm/fadvisor/charts/kube-state-metrics/values.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# Default values for prometheus. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -KubeStateMetricsName: "kube-state-metrics" - -pvc: - enable: false - -replicaCount: 1 - -# image for prometheus deployment -image: - repository: "quay.io/coreos/kube-state-metrics" - pullPolicy: Always - tag: "v1.8.0" - -service: - type: NodePort - port: 8080 - nodePort: 31113 - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoSchedule" \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/node-exporter/.helmignore b/deploy/helm/fadvisor/charts/node-exporter/.helmignore deleted file mode 100644 index 0e8a0eb..0000000 --- a/deploy/helm/fadvisor/charts/node-exporter/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/deploy/helm/fadvisor/charts/node-exporter/Chart.yaml b/deploy/helm/fadvisor/charts/node-exporter/Chart.yaml deleted file mode 100644 index 4058a80..0000000 --- a/deploy/helm/fadvisor/charts/node-exporter/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: node-exporter -description: A Helm chart for node-exporter - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" diff --git a/deploy/helm/fadvisor/charts/node-exporter/templates/_helpers.tpl b/deploy/helm/fadvisor/charts/node-exporter/templates/_helpers.tpl deleted file mode 100644 index 42af234..0000000 --- a/deploy/helm/fadvisor/charts/node-exporter/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "prometheus.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "prometheus.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "prometheus.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "prometheus.labels" -}} -helm.sh/chart: {{ include "prometheus.chart" . }} -{{ include "prometheus.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "prometheus.selectorLabels" -}} -app.kubernetes.io/name: {{ include "prometheus.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "prometheus.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "prometheus.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/node-exporter/templates/daemonset.yaml b/deploy/helm/fadvisor/charts/node-exporter/templates/daemonset.yaml deleted file mode 100644 index f2be27f..0000000 --- a/deploy/helm/fadvisor/charts/node-exporter/templates/daemonset.yaml +++ /dev/null @@ -1,69 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - app: {{ .Values.NodeExporterName }} - name: {{ .Values.NodeExporterName }} - namespace: {{ .Release.Namespace }} -spec: - selector: - matchLabels: - app: {{ .Values.NodeExporterName }} - template: - metadata: - labels: - app: {{ .Values.NodeExporterName }} - spec: - hostPID: true - hostIPC: true - hostNetwork: true - containers: - - name: {{ .Values.NodeExporterName }} - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} - args: - - --web.listen-address=$(HOSTIP):9100 - - --path.procfs=/host/proc - - --path.sysfs=/host/sys - - --path.rootfs=/host/root - - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) - - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ - ports: - - containerPort: 9100 - env: - - name: HOSTIP - valueFrom: - fieldRef: - fieldPath: status.hostIP - {{- with .Values.resources }} - resources: - {{- toYaml . | nindent 8 }} - {{- end }} - securityContext: - runAsNonRoot: true - runAsUser: 65534 - volumeMounts: - - name: proc - mountPath: /host/proc - - name: sys - mountPath: /host/sys - - name: root - mountPath: /host/root - mountPropagation: HostToContainer - readOnly: true - volumes: - - name: proc - hostPath: - path: /proc - - name: dev - hostPath: - path: /dev - - name: sys - hostPath: - path: /sys - - name: root - hostPath: - path: / - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/node-exporter/templates/service.yaml b/deploy/helm/fadvisor/charts/node-exporter/templates/service.yaml deleted file mode 100644 index 4e08a9b..0000000 --- a/deploy/helm/fadvisor/charts/node-exporter/templates/service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '{{ .Values.service.port }}' - labels: - app: {{ .Values.NodeExporterName }} - name: {{ .Values.NodeExporterName }} - namespace: {{ .Release.Namespace }} -spec: - type: {{ .Values.service.type }} - ports: - - name: {{ .Values.NodeExporterName }} - protocol: TCP - port: {{ .Values.service.port }} - targetPort: 9100 - {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} - nodePort: {{ .Values.service.nodePort }} - {{- end }} - selector: - app: {{ .Values.NodeExporterName }} \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/node-exporter/values.yaml b/deploy/helm/fadvisor/charts/node-exporter/values.yaml deleted file mode 100644 index 10f33ca..0000000 --- a/deploy/helm/fadvisor/charts/node-exporter/values.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# Default values for prometheus. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -NodeExporterName: "node-exporter" - -image: - repository: "prom/node-exporter" - pullPolicy: Always - tag: "v1.1.1" - -# -#resources: -# requests: -# cpu: 150m -# memory: 180Mi -# limits: -# cpu: 150m -# memory: 180Mi - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -tolerations: - - operator: "Exists" - -service: - type: NodePort - port: 9100 diff --git a/deploy/helm/fadvisor/charts/prometheus/.helmignore b/deploy/helm/fadvisor/charts/prometheus/.helmignore deleted file mode 100644 index 825c007..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj - -OWNERS diff --git a/deploy/helm/fadvisor/charts/prometheus/Chart.yaml b/deploy/helm/fadvisor/charts/prometheus/Chart.yaml deleted file mode 100644 index 0296e2a..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/Chart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v2 -name: prometheus -appVersion: 2.31.1 -version: 15.0.2 -description: Prometheus is a monitoring system and time series database. -home: https://prometheus.io/ -icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png -sources: - - https://github.com/prometheus/alertmanager - - https://github.com/prometheus/prometheus - - https://github.com/prometheus/pushgateway \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/prometheus/README.md b/deploy/helm/fadvisor/charts/prometheus/README.md deleted file mode 100644 index d8a1e9a..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/README.md +++ /dev/null @@ -1,226 +0,0 @@ -# Prometheus - -[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. - -This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -## Prerequisites - -- Kubernetes 1.16+ -- Helm 3+ - -## Get Repo Info - -```console -helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -helm repo update -``` - -_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ - -## Install Chart - -```console -helm install [RELEASE_NAME] prometheus-community/prometheus -``` - -_See [configuration](#configuration) below._ - -_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ - -## Dependencies - -By default this chart installs additional, dependent charts: - -- [kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics) - -To disable the dependency during installation, set `kubeStateMetrics.enabled` to `false`. - -_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ - -## Uninstall Chart - -```console -helm uninstall [RELEASE_NAME] -``` - -This removes all the Kubernetes components associated with the chart and deletes the release. - -_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ - -## Upgrading Chart - -```console -helm upgrade [RELEASE_NAME] [CHART] --install -``` - -_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ - -### To 15.0 - -Version 15.0.0 changes the relabeling config, aligning it with the [Prometheus community conventions](https://github.com/prometheus/prometheus/pull/9832). If you've made manual changes to the relabeling config, you have to adapt your changes. - -Before you update please execute the following command, to be able to update kube-state-metrics: - -```bash -kubectl delete deployments.apps -l app.kubernetes.io/instance=prometheus,app.kubernetes.io/name=kube-state-metrics --cascade=orphan -``` - -### To 9.0 - -Version 9.0 adds a new option to enable or disable the Prometheus Server. This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. To install the server `server.enabled` must be set to `true`. - -### To 5.0 - -As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. - -Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/). - -Users of this chart will need to update their alerting rules to the new format before they can upgrade. - -### Example Migration - -Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: - -1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: - - ```yaml - alertmanager: - enabled: false - alertmanagerFiles: - alertmanager.yml: "" - kubeStateMetrics: - enabled: false - nodeExporter: - enabled: false - pushgateway: - enabled: false - server: - extraArgs: - storage.local.retention: 720h - serverFiles: - alerts: "" - prometheus.yml: "" - rules: "" - ``` - -1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. - - ```yaml - prometheus.yml: - ... - remote_read: - - url: http://prometheus-old/api/v1/read - ... - ``` - - Old data will be available when you query the new prometheus instance. - -## Configuration - -See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: - -```console -helm show values prometheus-community/prometheus -``` - -You may similarly use the above configuration commands on each chart [dependency](#dependencies) to see it's configurations. - -### Scraping Pod Metrics via Annotations - -This chart uses a default configuration that causes prometheus to scrape a variety of kubernetes resource types, provided they have the correct annotations. In this section we describe how to configure pods to be scraped; for information on how other resource types can be scraped you can do a `helm template` to get the kubernetes resource definitions, and then reference the prometheus configuration in the ConfigMap against the prometheus documentation for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). - -In order to get prometheus to scrape pods, you must add annotations to the the pods as below: - -```yaml -metadata: - annotations: - prometheus.io/scrape: "true" - prometheus.io/path: /metrics - prometheus.io/port: "8080" -``` - -You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. `prometheus.io/port` should be set to the port that your pod serves metrics from. Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be enclosed in double quotes. - -### Sharing Alerts Between Services - -Note that when [installing](#install-chart) or [upgrading](#upgrading-chart) you may use multiple values override files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, - -```yaml -# values.yaml -# ... - -# service1-alert.yaml -serverFiles: - alerts: - service1: - - alert: anAlert - # ... - -# service2-alert.yaml -serverFiles: - alerts: - service2: - - alert: anAlert - # ... -``` - -```console -helm install [RELEASE_NAME] prometheus-community/prometheus -f values.yaml -f service1-alert.yaml -f service2-alert.yaml -``` - -### RBAC Configuration - -Roles and RoleBindings resources will be created automatically for `server` service. - -To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. - -> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. - -### ConfigMap Files - -AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. - -Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. - -### Ingress TLS - -If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. - -To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: - -```console -kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key -``` - -Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: - -```yaml -server: - ingress: - ## If true, Prometheus server Ingress will be created - ## - enabled: true - - ## Prometheus server Ingress hostnames - ## Must be provided if Ingress is enabled - ## - hosts: - - prometheus.domain.com - - ## Prometheus server Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: - - secretName: prometheus-server-tls - hosts: - - prometheus.domain.com -``` - -### NetworkPolicy - -Enabling Network Policy for Prometheus will secure connections to Alert Manager and Kube State Metrics by only accepting connections from Prometheus Server. All inbound connections to Prometheus Server are still allowed. - -To enable network policy for Prometheus, install a networking plugin that implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. - -If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need to manually create a networkpolicy which allows it. diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/NOTES.txt b/deploy/helm/fadvisor/charts/prometheus/templates/NOTES.txt deleted file mode 100644 index b6de39f..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/NOTES.txt +++ /dev/null @@ -1,103 +0,0 @@ -{{- if .Values.server.enabled -}} -The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: -{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - -{{ if .Values.server.ingress.enabled -}} -From outside the cluster, the server URL(s) are: -{{- range .Values.server.ingress.hosts }} -http://{{ . }} -{{- end }} -{{- else }} -Get the Prometheus server URL by running these commands in the same shell: -{{- if contains "NodePort" .Values.server.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.server.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} -{{- else if contains "ClusterIP" .Values.server.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.server.name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 -{{- end }} -{{- end }} - -{{- if .Values.server.persistentVolume.enabled }} -{{- else }} -################################################################################# -###### WARNING: Persistence is disabled!!! You will lose your data when ##### -###### the Server pod is terminated. ##### -################################################################################# -{{- end }} -{{- end }} - -{{ if .Values.alertmanager.enabled }} -The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.servicePort }} on the following DNS name from within your cluster: -{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - -{{ if .Values.alertmanager.ingress.enabled -}} -From outside the cluster, the alertmanager URL(s) are: -{{- range .Values.alertmanager.ingress.hosts }} -http://{{ . }} -{{- end }} -{{- else }} -Get the Alertmanager URL by running these commands in the same shell: -{{- if contains "NodePort" .Values.alertmanager.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} -{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.alertmanager.name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 -{{- end }} -{{- end }} - -{{- if .Values.alertmanager.persistentVolume.enabled }} -{{- else }} -################################################################################# -###### WARNING: Persistence is disabled!!! You will lose your data when ##### -###### the AlertManager pod is terminated. ##### -################################################################################# -{{- end }} -{{- end }} - - -{{ if .Values.pushgateway.enabled }} -The Prometheus PushGateway can be accessed via port {{ .Values.pushgateway.service.servicePort }} on the following DNS name from within your cluster: -{{ template "prometheus.pushgateway.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - -{{ if .Values.pushgateway.ingress.enabled -}} -From outside the cluster, the pushgateway URL(s) are: -{{- range .Values.pushgateway.ingress.hosts }} -http://{{ . }} -{{- end }} -{{- else }} -Get the PushGateway URL by running these commands in the same shell: -{{- if contains "NodePort" .Values.pushgateway.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.pushgateway.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.pushgateway.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.pushgateway.fullname" . }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo http://$SERVICE_IP:{{ .Values.pushgateway.service.servicePort }} -{{- else if contains "ClusterIP" .Values.pushgateway.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.pushgateway.name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 -{{- end }} -{{- end }} -{{- end }} - -For more information on running Prometheus, visit: -https://prometheus.io/ diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/_helpers.tpl b/deploy/helm/fadvisor/charts/prometheus/templates/_helpers.tpl deleted file mode 100644 index 065065c..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/_helpers.tpl +++ /dev/null @@ -1,282 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "prometheus.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "prometheus.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create unified labels for prometheus components -*/}} -{{- define "prometheus.common.matchLabels" -}} -app: {{ template "prometheus.name" . }} -release: {{ .Release.Name }} -{{- end -}} - -{{- define "prometheus.common.metaLabels" -}} -chart: {{ template "prometheus.chart" . }} -heritage: {{ .Release.Service }} -{{- end -}} - -{{- define "prometheus.alertmanager.labels" -}} -{{ include "prometheus.alertmanager.matchLabels" . }} -{{ include "prometheus.common.metaLabels" . }} -{{- end -}} - -{{- define "prometheus.alertmanager.matchLabels" -}} -component: {{ .Values.alertmanager.name | quote }} -{{ include "prometheus.common.matchLabels" . }} -{{- end -}} - -{{- define "prometheus.nodeExporter.labels" -}} -{{ include "prometheus.nodeExporter.matchLabels" . }} -{{ include "prometheus.common.metaLabels" . }} -{{- end -}} - -{{- define "prometheus.nodeExporter.matchLabels" -}} -component: {{ .Values.nodeExporter.name | quote }} -{{ include "prometheus.common.matchLabels" . }} -{{- end -}} - -{{- define "prometheus.pushgateway.labels" -}} -{{ include "prometheus.pushgateway.matchLabels" . }} -{{ include "prometheus.common.metaLabels" . }} -{{- end -}} - -{{- define "prometheus.pushgateway.matchLabels" -}} -component: {{ .Values.pushgateway.name | quote }} -{{ include "prometheus.common.matchLabels" . }} -{{- end -}} - -{{- define "prometheus.server.labels" -}} -{{ include "prometheus.server.matchLabels" . }} -{{ include "prometheus.common.metaLabels" . }} -{{- end -}} - -{{- define "prometheus.server.matchLabels" -}} -component: {{ .Values.server.name | quote }} -{{ include "prometheus.common.matchLabels" . }} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a fully qualified alertmanager name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} - -{{- define "prometheus.alertmanager.fullname" -}} -{{- if .Values.alertmanager.fullnameOverride -}} -{{- .Values.alertmanager.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a fully qualified node-exporter name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.nodeExporter.fullname" -}} -{{- if .Values.nodeExporter.fullnameOverride -}} -{{- .Values.nodeExporter.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a fully qualified Prometheus server name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.server.fullname" -}} -{{- if .Values.server.fullnameOverride -}} -{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a fully qualified pushgateway name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.pushgateway.fullname" -}} -{{- if .Values.pushgateway.fullnameOverride -}} -{{- .Values.pushgateway.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Get KubeVersion removing pre-release information. -*/}} -{{- define "prometheus.kubeVersion" -}} - {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for deployment. -*/}} -{{- define "prometheus.deployment.apiVersion" -}} -{{- print "apps/v1" -}} -{{- end -}} -{{/* -Return the appropriate apiVersion for daemonset. -*/}} -{{- define "prometheus.daemonset.apiVersion" -}} -{{- print "apps/v1" -}} -{{- end -}} -{{/* -Return the appropriate apiVersion for networkpolicy. -*/}} -{{- define "prometheus.networkPolicy.apiVersion" -}} -{{- print "networking.k8s.io/v1" -}} -{{- end -}} -{{/* -Return the appropriate apiVersion for podsecuritypolicy. -*/}} -{{- define "prometheus.podSecurityPolicy.apiVersion" -}} -{{- print "policy/v1beta1" -}} -{{- end -}} -{{/* -Return the appropriate apiVersion for rbac. -*/}} -{{- define "rbac.apiVersion" -}} -{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} -{{- print "rbac.authorization.k8s.io/v1" -}} -{{- else -}} -{{- print "rbac.authorization.k8s.io/v1beta1" -}} -{{- end -}} -{{- end -}} -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "ingress.apiVersion" -}} - {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "prometheus.kubeVersion" .)) -}} - {{- print "networking.k8s.io/v1" -}} - {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} - {{- print "networking.k8s.io/v1beta1" -}} - {{- else -}} - {{- print "extensions/v1beta1" -}} - {{- end -}} -{{- end -}} - -{{/* -Return if ingress is stable. -*/}} -{{- define "ingress.isStable" -}} - {{- eq (include "ingress.apiVersion" .) "networking.k8s.io/v1" -}} -{{- end -}} - -{{/* -Return if ingress supports ingressClassName. -*/}} -{{- define "ingress.supportsIngressClassName" -}} - {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} -{{- end -}} -{{/* -Return if ingress supports pathType. -*/}} -{{- define "ingress.supportsPathType" -}} - {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} -{{- end -}} - -{{/* -Create the name of the service account to use for the alertmanager component -*/}} -{{- define "prometheus.serviceAccountName.alertmanager" -}} -{{- if .Values.serviceAccounts.alertmanager.create -}} - {{ default (include "prometheus.alertmanager.fullname" .) .Values.serviceAccounts.alertmanager.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.alertmanager.name }} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to use for the nodeExporter component -*/}} -{{- define "prometheus.serviceAccountName.nodeExporter" -}} -{{- if .Values.serviceAccounts.nodeExporter.create -}} - {{ default (include "prometheus.nodeExporter.fullname" .) .Values.serviceAccounts.nodeExporter.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.nodeExporter.name }} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to use for the pushgateway component -*/}} -{{- define "prometheus.serviceAccountName.pushgateway" -}} -{{- if .Values.serviceAccounts.pushgateway.create -}} - {{ default (include "prometheus.pushgateway.fullname" .) .Values.serviceAccounts.pushgateway.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.pushgateway.name }} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to use for the server component -*/}} -{{- define "prometheus.serviceAccountName.server" -}} -{{- if .Values.serviceAccounts.server.create -}} - {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.server.name }} -{{- end -}} -{{- end -}} - -{{/* -Define the prometheus.namespace template if set with forceNamespace or .Release.Namespace is set -*/}} -{{- define "prometheus.namespace" -}} -{{- if .Values.forceNamespace -}} -{{ printf "namespace: %s" .Values.forceNamespace }} -{{- else -}} -{{ printf "namespace: %s" .Release.Namespace }} -{{- end -}} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/clusterrole.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/clusterrole.yaml deleted file mode 100644 index c732ff4..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/clusterrole.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.alertmanager.useClusterRole (not .Values.alertmanager.useExistingRole) -}} -apiVersion: {{ template "rbac.apiVersion" . }} -kind: ClusterRole -metadata: - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" . }} -rules: -{{- if .Values.podSecurityPolicy.enabled }} - - apiGroups: - - extensions - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - {{ template "prometheus.alertmanager.fullname" . }} -{{- else }} - [] -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml deleted file mode 100644 index 6f13e98..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.alertmanager.useClusterRole -}} -apiVersion: {{ template "rbac.apiVersion" . }} -kind: ClusterRoleBinding -metadata: - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.alertmanager" . }} -{{ include "prometheus.namespace" . | indent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole -{{- if (not .Values.alertmanager.useExistingRole) }} - name: {{ template "prometheus.alertmanager.fullname" . }} -{{- else }} - name: {{ .Values.alertmanager.useExistingRole }} -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/cm.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/cm.yaml deleted file mode 100644 index cb09bf0..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/cm.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if and .Values.alertmanager.enabled (and (empty .Values.alertmanager.configMapOverrideName) (empty .Values.alertmanager.configFromSecret)) -}} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -data: -{{- $root := . -}} -{{- range $key, $value := .Values.alertmanagerFiles }} - {{- if $key | regexMatch ".*\\.ya?ml$" }} - {{ $key }}: | -{{ toYaml $value | default "{}" | indent 4 }} - {{- else }} - {{ $key }}: {{ toYaml $value | indent 4 }} - {{- end }} -{{- end -}} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/deploy.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/deploy.yaml deleted file mode 100644 index d52ca44..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/deploy.yaml +++ /dev/null @@ -1,172 +0,0 @@ -{{- if and .Values.alertmanager.enabled (not .Values.alertmanager.statefulSet.enabled) -}} -apiVersion: {{ template "prometheus.deployment.apiVersion" . }} -kind: Deployment -metadata: -{{- if .Values.alertmanager.deploymentAnnotations }} - annotations: - {{ toYaml .Values.alertmanager.deploymentAnnotations | nindent 4 }} -{{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: - selector: - matchLabels: - {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} - replicas: {{ .Values.alertmanager.replicaCount }} - {{- if .Values.alertmanager.strategy }} - strategy: -{{ toYaml .Values.alertmanager.strategy | trim | indent 4 }} - {{ if eq .Values.alertmanager.strategy.type "Recreate" }}rollingUpdate: null{{ end }} -{{- end }} - template: - metadata: - {{- if .Values.alertmanager.podAnnotations }} - annotations: - {{ toYaml .Values.alertmanager.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 8 }} - {{- if .Values.alertmanager.podLabels}} - {{ toYaml .Values.alertmanager.podLabels | nindent 8 }} - {{- end}} - spec: -{{- if .Values.alertmanager.schedulerName }} - schedulerName: "{{ .Values.alertmanager.schedulerName }}" -{{- end }} - serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} - {{- if .Values.alertmanager.extraInitContainers }} - initContainers: -{{ toYaml .Values.alertmanager.extraInitContainers | indent 8 }} - {{- end }} -{{- if .Values.alertmanager.priorityClassName }} - priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" -{{- end }} - containers: - - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} - image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" - imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" - env: - {{- range $key, $value := .Values.alertmanager.extraEnv }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - - name: POD_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - args: - - --config.file=/etc/config/{{ .Values.alertmanager.configFileName }} - - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} - {{- if .Values.alertmanager.service.enableMeshPeer }} - - --cluster.listen-address=0.0.0.0:6783 - - --cluster.advertise-address=[$(POD_IP)]:6783 - {{- else }} - - --cluster.listen-address= - {{- end }} - {{- range $key, $value := .Values.alertmanager.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- if .Values.alertmanager.baseURL }} - - --web.external-url={{ .Values.alertmanager.baseURL }} - {{- end }} - {{- range .Values.alertmanager.clusterPeers }} - - --cluster.peer={{ . }} - {{- end }} - - ports: - - containerPort: 9093 - readinessProbe: - httpGet: - path: {{ .Values.alertmanager.prefixURL }}/-/ready - port: 9093 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: -{{ toYaml .Values.alertmanager.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - - name: storage-volume - mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" - subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" - {{- range .Values.alertmanager.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - - {{- if .Values.configmapReload.alertmanager.enabled }} - - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} - image: "{{ .Values.configmapReload.alertmanager.image.repository }}:{{ .Values.configmapReload.alertmanager.image.tag }}" - imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" - args: - - --volume-dir=/etc/config - - --webhook-url=http://127.0.0.1:9093{{ .Values.alertmanager.prefixURL }}/-/reload - {{- range $key, $value := .Values.configmapReload.alertmanager.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - resources: -{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - readOnly: true - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.nodeSelector }} - nodeSelector: -{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} - {{- end }} - {{- with .Values.alertmanager.dnsConfig }} - dnsConfig: -{{ toYaml . | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.securityContext }} - securityContext: -{{ toYaml .Values.alertmanager.securityContext | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.tolerations }} - tolerations: -{{ toYaml .Values.alertmanager.tolerations | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.affinity }} - affinity: -{{ toYaml .Values.alertmanager.affinity | indent 8 }} - {{- end }} - volumes: - - name: config-volume - {{- if empty .Values.alertmanager.configFromSecret }} - configMap: - name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} - {{- else }} - secret: - secretName: {{ .Values.alertmanager.configFromSecret }} - {{- end }} - {{- range .Values.alertmanager.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- with .optional }} - optional: {{ . }} - {{- end }} - {{- end }} - - name: storage-volume - {{- if .Values.alertmanager.persistentVolume.enabled }} - persistentVolumeClaim: - claimName: {{ if .Values.alertmanager.persistentVolume.existingClaim }}{{ .Values.alertmanager.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} - {{- else }} - emptyDir: - {{- if .Values.alertmanager.emptyDir.sizeLimit }} - sizeLimit: {{ .Values.alertmanager.emptyDir.sizeLimit }} - {{- else }} - {} - {{- end -}} - {{- end -}} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/headless-svc.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/headless-svc.yaml deleted file mode 100644 index 8c402c4..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/headless-svc.yaml +++ /dev/null @@ -1,31 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.alertmanager.statefulSet.headless.annotations }} - annotations: -{{ toYaml .Values.alertmanager.statefulSet.headless.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} -{{- if .Values.alertmanager.statefulSet.headless.labels }} -{{ toYaml .Values.alertmanager.statefulSet.headless.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.alertmanager.fullname" . }}-headless -{{ include "prometheus.namespace" . | indent 2 }} -spec: - clusterIP: None - ports: - - name: http - port: {{ .Values.alertmanager.statefulSet.headless.servicePort }} - protocol: TCP - targetPort: 9093 -{{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} - - name: meshpeer - port: 6783 - protocol: TCP - targetPort: 6783 -{{- end }} - selector: - {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/ingress.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/ingress.yaml deleted file mode 100644 index 2a7b67c..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/ingress.yaml +++ /dev/null @@ -1,57 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}} -{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} -{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} -{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} -{{- $releaseName := .Release.Name -}} -{{- $serviceName := include "prometheus.alertmanager.fullname" . }} -{{- $servicePort := .Values.alertmanager.service.servicePort -}} -{{- $ingressPath := .Values.alertmanager.ingress.path -}} -{{- $ingressPathType := .Values.alertmanager.ingress.pathType -}} -{{- $extraPaths := .Values.alertmanager.ingress.extraPaths -}} -apiVersion: {{ template "ingress.apiVersion" . }} -kind: Ingress -metadata: -{{- if .Values.alertmanager.ingress.annotations }} - annotations: -{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} -{{- range $key, $value := .Values.alertmanager.ingress.extraLabels }} - {{ $key }}: {{ $value }} -{{- end }} - name: {{ template "prometheus.alertmanager.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: - {{- if and $ingressSupportsIngressClassName .Values.alertmanager.ingress.ingressClassName }} - ingressClassName: {{ .Values.alertmanager.ingress.ingressClassName }} - {{- end }} - rules: - {{- range .Values.alertmanager.ingress.hosts }} - {{- $url := splitList "/" . }} - - host: {{ first $url }} - http: - paths: -{{ if $extraPaths }} -{{ toYaml $extraPaths | indent 10 }} -{{- end }} - - path: {{ $ingressPath }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $serviceName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $serviceName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- end -}} -{{- if .Values.alertmanager.ingress.tls }} - tls: -{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }} - {{- end -}} -{{- end -}} \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/netpol.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/netpol.yaml deleted file mode 100644 index e44ade6..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/netpol.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.networkPolicy.enabled -}} -apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} -kind: NetworkPolicy -metadata: - name: {{ template "prometheus.alertmanager.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} - ingress: - - from: - - podSelector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 12 }} - - ports: - - port: 9093 -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/pdb.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/pdb.yaml deleted file mode 100644 index 41a92f3..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/pdb.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.alertmanager.podDisruptionBudget.enabled }} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "prometheus.alertmanager.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} -spec: - maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }} - selector: - matchLabels: - {{- include "prometheus.alertmanager.labels" . | nindent 6 }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/psp.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/psp.yaml deleted file mode 100644 index 64fb130..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/psp.yaml +++ /dev/null @@ -1,46 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} -apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} -kind: PodSecurityPolicy -metadata: - name: {{ template "prometheus.alertmanager.fullname" . }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - annotations: -{{- if .Values.alertmanager.podSecurityPolicy.annotations }} -{{ toYaml .Values.alertmanager.podSecurityPolicy.annotations | indent 4 }} -{{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - volumes: - - 'configMap' - - 'persistentVolumeClaim' - - 'emptyDir' - - 'secret' - allowedHostPaths: - - pathPrefix: /etc - readOnly: true - - pathPrefix: {{ .Values.alertmanager.persistentVolume.mountPath }} - hostNetwork: false - hostPID: false - hostIPC: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: true -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/pvc.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/pvc.yaml deleted file mode 100644 index 8c0b130..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/pvc.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{{- if not .Values.alertmanager.statefulSet.enabled -}} -{{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}} -{{- if not .Values.alertmanager.persistentVolume.existingClaim -}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - {{- if .Values.alertmanager.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }} - {{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: - accessModes: -{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }} -{{- if .Values.alertmanager.persistentVolume.storageClass }} -{{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} - storageClassName: "" -{{- else }} - storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" -{{- end }} -{{- end }} -{{- if .Values.alertmanager.persistentVolume.volumeBindingMode }} - volumeBindingModeName: "{{ .Values.alertmanager.persistentVolume.volumeBindingMode }}" -{{- end }} - resources: - requests: - storage: "{{ .Values.alertmanager.persistentVolume.size }}" -{{- if .Values.alertmanager.persistentVolume.selector }} - selector: - {{- toYaml .Values.alertmanager.persistentVolume.selector | nindent 4 }} -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/role.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/role.yaml deleted file mode 100644 index ce60eaf..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/role.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.rbac.create (eq .Values.alertmanager.useClusterRole false) (not .Values.alertmanager.useExistingRole) -}} -{{- range $.Values.alertmanager.namespaces }} -apiVersion: {{ template "rbac.apiVersion" . }} -kind: Role -metadata: - labels: - {{- include "prometheus.alertmanager.labels" $ | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" $ }} - namespace: {{ . }} -rules: -{{- if $.Values.podSecurityPolicy.enabled }} - - apiGroups: - - extensions - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - {{ template "prometheus.alertmanager.fullname" $ }} -{{- else }} - [] -{{- end }} -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/rolebinding.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/rolebinding.yaml deleted file mode 100644 index 906d652..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/rolebinding.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.rbac.create (eq .Values.alertmanager.useClusterRole false) -}} -{{ range $.Values.alertmanager.namespaces }} -apiVersion: {{ template "rbac.apiVersion" . }} -kind: RoleBinding -metadata: - labels: - {{- include "prometheus.alertmanager.labels" $ | nindent 4 }} - name: {{ template "prometheus.alertmanager.fullname" $ }} - namespace: {{ . }} -subjects: - - kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.alertmanager" $ }} -{{ include "prometheus.namespace" $ | indent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role -{{- if (not $.Values.alertmanager.useExistingRole) }} - name: {{ template "prometheus.alertmanager.fullname" $ }} -{{- else }} - name: {{ $.Values.alertmanager.useExistingRole }} -{{- end }} -{{- end }} -{{ end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/service.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/service.yaml deleted file mode 100644 index 9edc9ac..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/service.yaml +++ /dev/null @@ -1,53 +0,0 @@ -{{- if .Values.alertmanager.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.alertmanager.service.annotations }} - annotations: -{{ toYaml .Values.alertmanager.service.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} -{{- if .Values.alertmanager.service.labels }} -{{ toYaml .Values.alertmanager.service.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.alertmanager.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: -{{- if .Values.alertmanager.service.clusterIP }} - clusterIP: {{ .Values.alertmanager.service.clusterIP }} -{{- end }} -{{- if .Values.alertmanager.service.externalIPs }} - externalIPs: -{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} -{{- end }} -{{- if .Values.alertmanager.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} -{{- end }} -{{- if .Values.alertmanager.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} - - {{ $cidr }} - {{- end }} -{{- end }} - ports: - - name: http - port: {{ .Values.alertmanager.service.servicePort }} - protocol: TCP - targetPort: 9093 - {{- if .Values.alertmanager.service.nodePort }} - nodePort: {{ .Values.alertmanager.service.nodePort }} - {{- end }} -{{- if .Values.alertmanager.service.enableMeshPeer }} - - name: meshpeer - port: 6783 - protocol: TCP - targetPort: 6783 -{{- end }} - selector: - {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} -{{- if .Values.alertmanager.service.sessionAffinity }} - sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }} -{{- end }} - type: "{{ .Values.alertmanager.service.type }}" -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/serviceaccount.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/serviceaccount.yaml deleted file mode 100644 index a5d996a..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/serviceaccount.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.serviceAccounts.alertmanager.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - name: {{ template "prometheus.serviceAccountName.alertmanager" . }} -{{ include "prometheus.namespace" . | indent 2 }} - annotations: -{{ toYaml .Values.serviceAccounts.alertmanager.annotations | indent 4 }} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/sts.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/sts.yaml deleted file mode 100644 index 4f247dc..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/alertmanager/sts.yaml +++ /dev/null @@ -1,181 +0,0 @@ -{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} -apiVersion: apps/v1 -kind: StatefulSet -metadata: -{{- if .Values.alertmanager.statefulSet.annotations }} - annotations: - {{ toYaml .Values.alertmanager.statefulSet.annotations | nindent 4 }} -{{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 4 }} - {{- if .Values.alertmanager.statefulSet.labels}} - {{ toYaml .Values.alertmanager.statefulSet.labels | nindent 4 }} - {{- end}} - name: {{ template "prometheus.alertmanager.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: - serviceName: {{ template "prometheus.alertmanager.fullname" . }}-headless - selector: - matchLabels: - {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} - replicas: {{ .Values.alertmanager.replicaCount }} - podManagementPolicy: {{ .Values.alertmanager.statefulSet.podManagementPolicy }} - template: - metadata: - {{- if .Values.alertmanager.podAnnotations }} - annotations: - {{ toYaml .Values.alertmanager.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "prometheus.alertmanager.labels" . | nindent 8 }} - {{- if .Values.alertmanager.podLabels}} - {{ toYaml .Values.alertmanager.podLabels | nindent 8 }} - {{- end}} - spec: -{{- if .Values.alertmanager.affinity }} - affinity: -{{ toYaml .Values.alertmanager.affinity | indent 8 }} -{{- end }} -{{- if .Values.alertmanager.schedulerName }} - schedulerName: "{{ .Values.alertmanager.schedulerName }}" -{{- end }} - serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} -{{- if .Values.alertmanager.priorityClassName }} - priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" -{{- end }} - containers: - - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} - image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" - imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" - env: - {{- range $key, $value := .Values.alertmanager.extraEnv }} - - name: {{ $key }} - value: {{ $value }} - {{- end }} - - name: POD_IP - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.podIP - args: - - --config.file=/etc/config/alertmanager.yml - - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} - {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} - - --cluster.advertise-address=[$(POD_IP)]:6783 - - --cluster.listen-address=0.0.0.0:6783 - {{- range $n := until (.Values.alertmanager.replicaCount | int) }} - - --cluster.peer={{ template "prometheus.alertmanager.fullname" $ }}-{{ $n }}.{{ template "prometheus.alertmanager.fullname" $ }}-headless:6783 - {{- end }} - {{- else }} - - --cluster.listen-address= - {{- end }} - {{- range $key, $value := .Values.alertmanager.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- if .Values.alertmanager.baseURL }} - - --web.external-url={{ .Values.alertmanager.baseURL }} - {{- end }} - - ports: - - containerPort: 9093 - {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} - - containerPort: 6783 - {{- end }} - readinessProbe: - httpGet: - path: {{ .Values.alertmanager.prefixURL }}/#/status - port: 9093 - initialDelaySeconds: 30 - timeoutSeconds: 30 - resources: -{{ toYaml .Values.alertmanager.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - - name: storage-volume - mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" - subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" - {{- range .Values.alertmanager.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- if .Values.configmapReload.alertmanager.enabled }} - - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} - image: "{{ .Values.configmapReload.alertmanager.image.repository }}:{{ .Values.configmapReload.alertmanager.image.tag }}" - imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" - args: - - --volume-dir=/etc/config - - --webhook-url=http://localhost:9093{{ .Values.alertmanager.prefixURL }}/-/reload - resources: -{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - readOnly: true - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.nodeSelector }} - nodeSelector: -{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.securityContext }} - securityContext: -{{ toYaml .Values.alertmanager.securityContext | indent 8 }} - {{- end }} - {{- if .Values.alertmanager.tolerations }} - tolerations: -{{ toYaml .Values.alertmanager.tolerations | indent 8 }} - {{- end }} - volumes: - - name: config-volume - {{- if empty .Values.alertmanager.configFromSecret }} - configMap: - name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} - {{- else }} - secret: - secretName: {{ .Values.alertmanager.configFromSecret }} - {{- end }} - {{- range .Values.alertmanager.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- with .optional }} - optional: {{ . }} - {{- end }} - {{- end }} -{{- if .Values.alertmanager.persistentVolume.enabled }} - volumeClaimTemplates: - - metadata: - name: storage-volume - {{- if .Values.alertmanager.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 10 }} - {{- end }} - spec: - accessModes: -{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 10 }} - resources: - requests: - storage: "{{ .Values.alertmanager.persistentVolume.size }}" - {{- if .Values.server.persistentVolume.storageClass }} - {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} - storageClassName: "" - {{- else }} - storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" - {{- end }} - {{- end }} -{{- else }} - - name: storage-volume - emptyDir: - {{- if .Values.alertmanager.emptyDir.sizeLimit }} - sizeLimit: {{ .Values.alertmanager.emptyDir.sizeLimit }} - {{- else }} - {} - {{- end -}} -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/clusterrole.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/clusterrole.yaml deleted file mode 100644 index 76ecf05..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/clusterrole.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} -apiVersion: {{ template "rbac.apiVersion" . }} -kind: ClusterRole -metadata: - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.pushgateway.fullname" . }} -rules: -{{- if .Values.podSecurityPolicy.enabled }} - - apiGroups: - - extensions - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - {{ template "prometheus.pushgateway.fullname" . }} -{{- else }} - [] -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml deleted file mode 100644 index 15770ee..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} -apiVersion: {{ template "rbac.apiVersion" . }} -kind: ClusterRoleBinding -metadata: - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.pushgateway.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.pushgateway" . }} -{{ include "prometheus.namespace" . | indent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "prometheus.pushgateway.fullname" . }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/deploy.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/deploy.yaml deleted file mode 100644 index ffdbfcc..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/deploy.yaml +++ /dev/null @@ -1,119 +0,0 @@ -{{- if .Values.pushgateway.enabled -}} -apiVersion: {{ template "prometheus.deployment.apiVersion" . }} -kind: Deployment -metadata: -{{- if .Values.pushgateway.deploymentAnnotations }} - annotations: - {{ toYaml .Values.pushgateway.deploymentAnnotations | nindent 4 }} -{{- end }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.pushgateway.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: - selector: - {{- if .Values.schedulerName }} - schedulerName: "{{ .Values.schedulerName }}" - {{- end }} - matchLabels: - {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} - replicas: {{ .Values.pushgateway.replicaCount }} - {{- if .Values.pushgateway.strategy }} - strategy: -{{ toYaml .Values.pushgateway.strategy | trim | indent 4 }} - {{ if eq .Values.pushgateway.strategy.type "Recreate" }}rollingUpdate: null{{ end }} -{{- end }} - template: - metadata: - {{- if .Values.pushgateway.podAnnotations }} - annotations: - {{ toYaml .Values.pushgateway.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 8 }} - {{- if .Values.pushgateway.podLabels }} - {{ toYaml .Values.pushgateway.podLabels | nindent 8 }} - {{- end }} - spec: - serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }} - {{- if .Values.pushgateway.extraInitContainers }} - initContainers: -{{ toYaml .Values.pushgateway.extraInitContainers | indent 8 }} - {{- end }} -{{- if .Values.pushgateway.priorityClassName }} - priorityClassName: "{{ .Values.pushgateway.priorityClassName }}" -{{- end }} - containers: - - name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }} - image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}" - imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}" - args: - {{- range $key, $value := .Values.pushgateway.extraArgs }} - {{- $stringvalue := toString $value }} - {{- if eq $stringvalue "true" }} - - --{{ $key }} - {{- else }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- end }} - ports: - - containerPort: 9091 - livenessProbe: - httpGet: - {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} - path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/healthy - {{- else }} - path: /-/healthy - {{- end }} - port: 9091 - initialDelaySeconds: 10 - timeoutSeconds: 10 - readinessProbe: - httpGet: - {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} - path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/ready - {{- else }} - path: /-/ready - {{- end }} - port: 9091 - initialDelaySeconds: 10 - timeoutSeconds: 10 - resources: -{{ toYaml .Values.pushgateway.resources | indent 12 }} - {{- if .Values.pushgateway.persistentVolume.enabled }} - volumeMounts: - - name: storage-volume - mountPath: "{{ .Values.pushgateway.persistentVolume.mountPath }}" - subPath: "{{ .Values.pushgateway.persistentVolume.subPath }}" - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - {{- if .Values.pushgateway.nodeSelector }} - nodeSelector: -{{ toYaml .Values.pushgateway.nodeSelector | indent 8 }} - {{- end }} - {{- with .Values.pushgateway.dnsConfig }} - dnsConfig: -{{ toYaml . | indent 8 }} - {{- end }} - {{- if .Values.pushgateway.securityContext }} - securityContext: -{{ toYaml .Values.pushgateway.securityContext | indent 8 }} - {{- end }} - {{- if .Values.pushgateway.tolerations }} - tolerations: -{{ toYaml .Values.pushgateway.tolerations | indent 8 }} - {{- end }} - {{- if .Values.pushgateway.affinity }} - affinity: -{{ toYaml .Values.pushgateway.affinity | indent 8 }} - {{- end }} - {{- if .Values.pushgateway.persistentVolume.enabled }} - volumes: - - name: storage-volume - persistentVolumeClaim: - claimName: {{ if .Values.pushgateway.persistentVolume.existingClaim }}{{ .Values.pushgateway.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.pushgateway.fullname" . }}{{- end }} - {{- end -}} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/ingress.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/ingress.yaml deleted file mode 100644 index 2ff72ab..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/ingress.yaml +++ /dev/null @@ -1,54 +0,0 @@ -{{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}} -{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} -{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} -{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} -{{- $releaseName := .Release.Name -}} -{{- $serviceName := include "prometheus.pushgateway.fullname" . }} -{{- $servicePort := .Values.pushgateway.service.servicePort -}} -{{- $ingressPath := .Values.pushgateway.ingress.path -}} -{{- $ingressPathType := .Values.pushgateway.ingress.pathType -}} -{{- $extraPaths := .Values.pushgateway.ingress.extraPaths -}} -apiVersion: {{ template "ingress.apiVersion" . }} -kind: Ingress -metadata: -{{- if .Values.pushgateway.ingress.annotations }} - annotations: -{{ toYaml .Values.pushgateway.ingress.annotations | indent 4}} -{{- end }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.pushgateway.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: - {{- if and $ingressSupportsIngressClassName .Values.pushgateway.ingress.ingressClassName }} - ingressClassName: {{ .Values.pushgateway.ingress.ingressClassName }} - {{- end }} - rules: - {{- range .Values.pushgateway.ingress.hosts }} - {{- $url := splitList "/" . }} - - host: {{ first $url }} - http: - paths: -{{ if $extraPaths }} -{{ toYaml $extraPaths | indent 10 }} -{{- end }} - - path: {{ $ingressPath }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $serviceName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $serviceName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- end -}} -{{- if .Values.pushgateway.ingress.tls }} - tls: -{{ toYaml .Values.pushgateway.ingress.tls | indent 4 }} - {{- end -}} -{{- end -}} \ No newline at end of file diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/netpol.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/netpol.yaml deleted file mode 100644 index c8d1fb3..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/netpol.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and .Values.pushgateway.enabled .Values.networkPolicy.enabled -}} -apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} -kind: NetworkPolicy -metadata: - name: {{ template "prometheus.pushgateway.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} - ingress: - - from: - - podSelector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 12 }} - - ports: - - port: 9091 -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/pdb.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/pdb.yaml deleted file mode 100644 index 50beb48..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/pdb.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.pushgateway.podDisruptionBudget.enabled }} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "prometheus.pushgateway.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} -spec: - maxUnavailable: {{ .Values.pushgateway.podDisruptionBudget.maxUnavailable }} - selector: - matchLabels: - {{- include "prometheus.pushgateway.labels" . | nindent 6 }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/psp.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/psp.yaml deleted file mode 100644 index 1ca3267..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/psp.yaml +++ /dev/null @@ -1,42 +0,0 @@ -{{- if and .Values.pushgateway.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} -apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} -kind: PodSecurityPolicy -metadata: - name: {{ template "prometheus.pushgateway.fullname" . }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - annotations: -{{- if .Values.pushgateway.podSecurityPolicy.annotations }} -{{ toYaml .Values.pushgateway.podSecurityPolicy.annotations | indent 4 }} -{{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - - ALL - volumes: - - 'persistentVolumeClaim' - - 'secret' - allowedHostPaths: - - pathPrefix: {{ .Values.pushgateway.persistentVolume.mountPath }} - hostNetwork: false - hostPID: false - hostIPC: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: true -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/pvc.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/pvc.yaml deleted file mode 100644 index 227e7a9..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/pvc.yaml +++ /dev/null @@ -1,31 +0,0 @@ -{{- if .Values.pushgateway.persistentVolume.enabled -}} -{{- if not .Values.pushgateway.persistentVolume.existingClaim -}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - {{- if .Values.pushgateway.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.pushgateway.persistentVolume.annotations | indent 4 }} - {{- end }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.pushgateway.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: - accessModes: -{{ toYaml .Values.pushgateway.persistentVolume.accessModes | indent 4 }} -{{- if .Values.pushgateway.persistentVolume.storageClass }} -{{- if (eq "-" .Values.pushgateway.persistentVolume.storageClass) }} - storageClassName: "" -{{- else }} - storageClassName: "{{ .Values.pushgateway.persistentVolume.storageClass }}" -{{- end }} -{{- end }} -{{- if .Values.pushgateway.persistentVolume.volumeBindingMode }} - volumeBindingModeName: "{{ .Values.pushgateway.persistentVolume.volumeBindingMode }}" -{{- end }} - resources: - requests: - storage: "{{ .Values.pushgateway.persistentVolume.size }}" -{{- end -}} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/service.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/service.yaml deleted file mode 100644 index f05f17c..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/service.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{- if .Values.pushgateway.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.pushgateway.service.annotations }} - annotations: -{{ toYaml .Values.pushgateway.service.annotations | indent 4}} -{{- end }} - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} -{{- if .Values.pushgateway.service.labels }} -{{ toYaml .Values.pushgateway.service.labels | indent 4}} -{{- end }} - name: {{ template "prometheus.pushgateway.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: -{{- if .Values.pushgateway.service.clusterIP }} - clusterIP: {{ .Values.pushgateway.service.clusterIP }} -{{- end }} -{{- if .Values.pushgateway.service.externalIPs }} - externalIPs: -{{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }} -{{- end }} -{{- if .Values.pushgateway.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }} -{{- end }} -{{- if .Values.pushgateway.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }} - - {{ $cidr }} - {{- end }} -{{- end }} - ports: - - name: http - port: {{ .Values.pushgateway.service.servicePort }} - protocol: TCP - targetPort: 9091 - selector: - {{- include "prometheus.pushgateway.matchLabels" . | nindent 4 }} - type: "{{ .Values.pushgateway.service.type }}" -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/serviceaccount.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/serviceaccount.yaml deleted file mode 100644 index 8c0b876..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/serviceaccount.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if and .Values.pushgateway.enabled .Values.serviceAccounts.pushgateway.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.serviceAccountName.pushgateway" . }} -{{ include "prometheus.namespace" . | indent 2 }} - annotations: -{{ toYaml .Values.serviceAccounts.pushgateway.annotations | indent 4 }} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/vpa.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/vpa.yaml deleted file mode 100644 index 0ac54f9..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/pushgateway/vpa.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if .Values.pushgateway.enabled -}} -{{- if .Values.pushgateway.verticalAutoscaler.enabled -}} -apiVersion: autoscaling.k8s.io/v1beta2 -kind: VerticalPodAutoscaler -metadata: - labels: - {{- include "prometheus.pushgateway.labels" . | nindent 4 }} - name: {{ template "prometheus.pushgateway.fullname" . }}-vpa -{{ include "prometheus.namespace" . | indent 2 }} -spec: - targetRef: - apiVersion: "apps/v1" - kind: Deployment - name: {{ template "prometheus.pushgateway.fullname" . }} - updatePolicy: - updateMode: {{ .Values.pushgateway.verticalAutoscaler.updateMode | default "Off" | quote }} - resourcePolicy: - containerPolicies: {{ .Values.pushgateway.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} -{{- end -}} {{/* if .Values.pushgateway.verticalAutoscaler.enabled */}} -{{- end -}} {{/* .Values.pushgateway.enabled */}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/clusterrole.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/clusterrole.yaml deleted file mode 100644 index 2520235..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/clusterrole.yaml +++ /dev/null @@ -1,48 +0,0 @@ -{{- if and .Values.server.enabled .Values.rbac.create (empty .Values.server.useExistingClusterRoleName) -}} -apiVersion: {{ template "rbac.apiVersion" . }} -kind: ClusterRole -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }} -rules: -{{- if .Values.podSecurityPolicy.enabled }} - - apiGroups: - - extensions - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - {{ template "prometheus.server.fullname" . }} -{{- end }} - - apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - nodes/metrics - - services - - endpoints - - pods - - ingresses - - configmaps - verbs: - - get - - list - - watch - - apiGroups: - - "extensions" - - "networking.k8s.io" - resources: - - ingresses/status - - ingresses - verbs: - - get - - list - - watch - - nonResourceURLs: - - "/metrics" - verbs: - - get -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/clusterrolebinding.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/clusterrolebinding.yaml deleted file mode 100644 index 5a79611..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/clusterrolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.server.enabled .Values.rbac.create (empty .Values.server.namespaces) (empty .Values.server.useExistingClusterRoleName) -}} -apiVersion: {{ template "rbac.apiVersion" . }} -kind: ClusterRoleBinding -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.server" . }} -{{ include "prometheus.namespace" . | indent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "prometheus.server.fullname" . }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/cm.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/cm.yaml deleted file mode 100644 index e012694..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/cm.yaml +++ /dev/null @@ -1,82 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if (empty .Values.server.configMapOverrideName) -}} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -data: -{{- $root := . -}} -{{- range $key, $value := .Values.serverFiles }} - {{ $key }}: | -{{- if eq $key "prometheus.yml" }} - global: -{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} -{{- if $root.Values.server.remoteWrite }} - remote_write: -{{ $root.Values.server.remoteWrite | toYaml | indent 4 }} -{{- end }} -{{- if $root.Values.server.remoteRead }} - remote_read: -{{ $root.Values.server.remoteRead | toYaml | indent 4 }} -{{- end }} -{{- end }} -{{- if eq $key "alerts" }} -{{- if and (not (empty $value)) (empty $value.groups) }} - groups: -{{- range $ruleKey, $ruleValue := $value }} - - name: {{ $ruleKey -}}.rules - rules: -{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} -{{- end }} -{{- else }} -{{ toYaml $value | indent 4 }} -{{- end }} -{{- else }} -{{ toYaml $value | default "{}" | indent 4 }} -{{- end }} -{{- if eq $key "prometheus.yml" -}} -{{- if $root.Values.extraScrapeConfigs }} -{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} -{{- end -}} -{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }} - alerting: -{{- if $root.Values.alertRelabelConfigs }} -{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} -{{- end }} - alertmanagers: -{{- if $root.Values.server.alertmanagers }} -{{ toYaml $root.Values.server.alertmanagers | indent 8 }} -{{- else }} - - kubernetes_sd_configs: - - role: pod - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - {{- if $root.Values.alertmanager.prefixURL }} - path_prefix: {{ $root.Values.alertmanager.prefixURL }} - {{- end }} - relabel_configs: - - source_labels: [__meta_kubernetes_namespace] - regex: {{ $root.Release.Namespace }} - action: keep - - source_labels: [__meta_kubernetes_pod_label_app] - regex: {{ template "prometheus.name" $root }} - action: keep - - source_labels: [__meta_kubernetes_pod_label_component] - regex: alertmanager - action: keep - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe] - regex: {{ index $root.Values.alertmanager.podAnnotations "prometheus.io/probe" | default ".*" }} - action: keep - - source_labels: [__meta_kubernetes_pod_container_port_number] - regex: "9093" - action: keep -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/deploy.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/deploy.yaml deleted file mode 100644 index b7c3ffb..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/deploy.yaml +++ /dev/null @@ -1,304 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if not .Values.server.statefulSet.enabled -}} -apiVersion: {{ template "prometheus.deployment.apiVersion" . }} -kind: Deployment -metadata: -{{- if .Values.server.deploymentAnnotations }} - annotations: - {{ toYaml .Values.server.deploymentAnnotations | nindent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: - selector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 6 }} - replicas: {{ .Values.server.replicaCount }} - {{- if .Values.server.strategy }} - strategy: -{{ toYaml .Values.server.strategy | trim | indent 4 }} - {{ if eq .Values.server.strategy.type "Recreate" }}rollingUpdate: null{{ end }} -{{- end }} - template: - metadata: - {{- if .Values.server.podAnnotations }} - annotations: - {{ toYaml .Values.server.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 8 }} - {{- if .Values.server.podLabels}} - {{ toYaml .Values.server.podLabels | nindent 8 }} - {{- end}} - spec: -{{- if .Values.server.priorityClassName }} - priorityClassName: "{{ .Values.server.priorityClassName }}" -{{- end }} -{{- if .Values.server.schedulerName }} - schedulerName: "{{ .Values.server.schedulerName }}" -{{- end }} -{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} - {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} - enableServiceLinks: true - {{- else }} - enableServiceLinks: false - {{- end }} -{{- end }} - serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} - {{- if .Values.server.extraInitContainers }} - initContainers: -{{ toYaml .Values.server.extraInitContainers | indent 8 }} - {{- end }} - containers: - {{- if .Values.configmapReload.prometheus.enabled }} - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} - image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" - imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" - args: - - --volume-dir=/etc/config - - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload - {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} - - --volume-dir={{ . }} - {{- end }} - resources: -{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - readOnly: true - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- end }} - - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} - image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" - imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" - {{- if .Values.server.env }} - env: -{{ toYaml .Values.server.env | indent 12}} - {{- end }} - args: - {{- if .Values.server.prefixURL }} - - --web.route-prefix={{ .Values.server.prefixURL }} - {{- end }} - {{- if .Values.server.retention }} - - --storage.tsdb.retention.time={{ .Values.server.retention }} - {{- end }} - - --config.file={{ .Values.server.configPath }} - {{- if .Values.server.storagePath }} - - --storage.tsdb.path={{ .Values.server.storagePath }} - {{- else }} - - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} - {{- end }} - - --web.console.libraries=/etc/prometheus/console_libraries - - --web.console.templates=/etc/prometheus/consoles - {{- range .Values.server.extraFlags }} - - --{{ . }} - {{- end }} - {{- range $key, $value := .Values.server.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- if .Values.server.baseURL }} - - --web.external-url={{ .Values.server.baseURL }} - {{- end }} - ports: - - containerPort: 9090 - readinessProbe: - {{- if not .Values.server.tcpSocketProbeEnabled }} - httpGet: - path: {{ .Values.server.prefixURL }}/-/ready - port: 9090 - scheme: {{ .Values.server.probeScheme }} - {{- if .Values.server.probeHeaders }} - httpHeaders: - {{- range .Values.server.probeHeaders}} - - name: {{ .name }} - value: {{ .value }} - {{- end }} - {{- end }} - {{- else }} - tcpSocket: - port: 9090 - {{- end }} - initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} - periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} - timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} - failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} - successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} - livenessProbe: - {{- if not .Values.server.tcpSocketProbeEnabled }} - httpGet: - path: {{ .Values.server.prefixURL }}/-/healthy - port: 9090 - scheme: {{ .Values.server.probeScheme }} - {{- if .Values.server.probeHeaders }} - httpHeaders: - {{- range .Values.server.probeHeaders}} - - name: {{ .name }} - value: {{ .value }} - {{- end }} - {{- end }} - {{- else }} - tcpSocket: - port: 9090 - {{- end }} - initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} - periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} - timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} - failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} - successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} - {{- if .Values.server.startupProbe.enabled }} - startupProbe: - {{- if not .Values.server.tcpSocketProbeEnabled }} - httpGet: - path: {{ .Values.server.prefixURL }}/-/healthy - port: 9090 - scheme: {{ .Values.server.probeScheme }} - {{- if .Values.server.probeHeaders }} - httpHeaders: - {{- range .Values.server.probeHeaders}} - - name: {{ .name }} - value: {{ .value }} - {{- end }} - {{- end }} - {{- else }} - tcpSocket: - port: 9090 - {{- end }} - failureThreshold: {{ .Values.server.startupProbe.failureThreshold }} - periodSeconds: {{ .Values.server.startupProbe.periodSeconds }} - timeoutSeconds: {{ .Values.server.startupProbe.timeoutSeconds }} - {{- end }} - resources: -{{ toYaml .Values.server.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - - name: storage-volume - mountPath: {{ .Values.server.persistentVolume.mountPath }} - subPath: "{{ .Values.server.persistentVolume.subPath }}" - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- if .Values.server.extraVolumeMounts }} - {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} - {{- end }} - {{- if .Values.server.sidecarContainers }} - {{- range $name, $spec := .Values.server.sidecarContainers }} - - name: {{ $name }} - {{- if kindIs "string" $spec }} - {{- tpl $spec $ | nindent 10 }} - {{- else }} - {{- toYaml $spec | nindent 10 }} - {{- end }} - {{- end }} - {{- end }} - hostNetwork: {{ .Values.server.hostNetwork }} - {{- if .Values.server.dnsPolicy }} - dnsPolicy: {{ .Values.server.dnsPolicy }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - {{- if .Values.server.nodeSelector }} - nodeSelector: -{{ toYaml .Values.server.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.server.hostAliases }} - hostAliases: -{{ toYaml .Values.server.hostAliases | indent 8 }} - {{- end }} - {{- if .Values.server.dnsConfig }} - dnsConfig: -{{ toYaml .Values.server.dnsConfig | indent 8 }} - {{- end }} - {{- if .Values.server.securityContext }} - securityContext: -{{ toYaml .Values.server.securityContext | indent 8 }} - {{- end }} - {{- if .Values.server.tolerations }} - tolerations: -{{ toYaml .Values.server.tolerations | indent 8 }} - {{- end }} - {{- if .Values.server.affinity }} - affinity: -{{ toYaml .Values.server.affinity | indent 8 }} - {{- end }} - terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} - volumes: - - name: config-volume - configMap: - name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - hostPath: - path: {{ .hostPath }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- with .optional }} - optional: {{ . }} - {{- end }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ .name }} - configMap: - name: {{ .configMap }} - {{- with .optional }} - optional: {{ . }} - {{- end }} - {{- end }} -{{- if .Values.server.extraVolumes }} -{{ toYaml .Values.server.extraVolumes | indent 8}} -{{- end }} - - name: storage-volume - {{- if .Values.server.persistentVolume.enabled }} - persistentVolumeClaim: - claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} - {{- else }} - emptyDir: - {{- if .Values.server.emptyDir.sizeLimit }} - sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} - {{- else }} - {} - {{- end -}} - {{- end -}} -{{- end -}} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/headless-svc.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/headless-svc.yaml deleted file mode 100644 index d519f4e..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/headless-svc.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.server.statefulSet.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.server.statefulSet.headless.annotations }} - annotations: -{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -{{- if .Values.server.statefulSet.headless.labels }} -{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.server.fullname" . }}-headless -{{ include "prometheus.namespace" . | indent 2 }} -spec: - clusterIP: None - ports: - - name: http - port: {{ .Values.server.statefulSet.headless.servicePort }} - protocol: TCP - targetPort: 9090 - {{- if .Values.server.statefulSet.headless.gRPC.enabled }} - - name: grpc - port: {{ .Values.server.statefulSet.headless.gRPC.servicePort }} - protocol: TCP - targetPort: 10901 - {{- if .Values.server.statefulSet.headless.gRPC.nodePort }} - nodePort: {{ .Values.server.statefulSet.headless.gRPC.nodePort }} - {{- end }} - {{- end }} - - selector: - {{- include "prometheus.server.matchLabels" . | nindent 4 }} -{{- end -}} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/ingress.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/ingress.yaml deleted file mode 100644 index 000f39c..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/ingress.yaml +++ /dev/null @@ -1,59 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.server.ingress.enabled -}} -{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} -{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} -{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} -{{- $releaseName := .Release.Name -}} -{{- $serviceName := include "prometheus.server.fullname" . }} -{{- $servicePort := .Values.server.service.servicePort -}} -{{- $ingressPath := .Values.server.ingress.path -}} -{{- $ingressPathType := .Values.server.ingress.pathType -}} -{{- $extraPaths := .Values.server.ingress.extraPaths -}} -apiVersion: {{ template "ingress.apiVersion" . }} -kind: Ingress -metadata: -{{- if .Values.server.ingress.annotations }} - annotations: -{{ toYaml .Values.server.ingress.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -{{- range $key, $value := .Values.server.ingress.extraLabels }} - {{ $key }}: {{ $value }} -{{- end }} - name: {{ template "prometheus.server.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: - {{- if and $ingressSupportsIngressClassName .Values.server.ingress.ingressClassName }} - ingressClassName: {{ .Values.server.ingress.ingressClassName }} - {{- end }} - rules: - {{- range .Values.server.ingress.hosts }} - {{- $url := splitList "/" . }} - - host: {{ first $url }} - http: - paths: -{{ if $extraPaths }} -{{ toYaml $extraPaths | indent 10 }} -{{- end }} - - path: {{ $ingressPath }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $serviceName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $serviceName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- end -}} -{{- if .Values.server.ingress.tls }} - tls: -{{ toYaml .Values.server.ingress.tls | indent 4 }} - {{- end -}} -{{- end -}} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/netpol.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/netpol.yaml deleted file mode 100644 index c8870e9..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/netpol.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.networkPolicy.enabled }} -apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} -kind: NetworkPolicy -metadata: - name: {{ template "prometheus.server.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 6 }} - ingress: - - ports: - - port: 9090 -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/pdb.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/pdb.yaml deleted file mode 100644 index 364cb5b..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/pdb.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.server.podDisruptionBudget.enabled }} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "prometheus.server.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -spec: - maxUnavailable: {{ .Values.server.podDisruptionBudget.maxUnavailable }} - selector: - matchLabels: - {{- include "prometheus.server.labels" . | nindent 6 }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/psp.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/psp.yaml deleted file mode 100644 index e2b885f..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/psp.yaml +++ /dev/null @@ -1,51 +0,0 @@ -{{- if and .Values.server.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} -apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} -kind: PodSecurityPolicy -metadata: - name: {{ template "prometheus.server.fullname" . }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - annotations: -{{- if .Values.server.podSecurityPolicy.annotations }} -{{ toYaml .Values.server.podSecurityPolicy.annotations | indent 4 }} -{{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - allowedCapabilities: - - 'CHOWN' - volumes: - - 'configMap' - - 'persistentVolumeClaim' - - 'emptyDir' - - 'secret' - - 'hostPath' - allowedHostPaths: - - pathPrefix: /etc - readOnly: true - - pathPrefix: {{ .Values.server.persistentVolume.mountPath }} - {{- range .Values.server.extraHostPathMounts }} - - pathPrefix: {{ .hostPath }} - readOnly: {{ .readOnly }} - {{- end }} - hostNetwork: false - hostPID: false - hostIPC: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: false -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/pvc.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/pvc.yaml deleted file mode 100644 index 60ebc6d..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/pvc.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if not .Values.server.statefulSet.enabled -}} -{{- if .Values.server.persistentVolume.enabled -}} -{{- if not .Values.server.persistentVolume.existingClaim -}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - {{- if .Values.server.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} - {{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: - accessModes: -{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} -{{- if .Values.server.persistentVolume.storageClass }} -{{- if (eq "-" .Values.server.persistentVolume.storageClass) }} - storageClassName: "" -{{- else }} - storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" -{{- end }} -{{- end }} -{{- if .Values.server.persistentVolume.volumeBindingMode }} - volumeBindingModeName: "{{ .Values.server.persistentVolume.volumeBindingMode }}" -{{- end }} - resources: - requests: - storage: "{{ .Values.server.persistentVolume.size }}" -{{- if .Values.server.persistentVolume.selector }} - selector: - {{- toYaml .Values.server.persistentVolume.selector | nindent 4 }} -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/rolebinding.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/rolebinding.yaml deleted file mode 100644 index 93ce3ee..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/rolebinding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and .Values.server.enabled .Values.rbac.create .Values.server.useExistingClusterRoleName .Values.server.namespaces -}} -{{ range $.Values.server.namespaces -}} ---- -apiVersion: {{ template "rbac.apiVersion" $ }} -kind: RoleBinding -metadata: - labels: - {{- include "prometheus.server.labels" $ | nindent 4 }} - name: {{ template "prometheus.server.fullname" $ }} - namespace: {{ . }} -subjects: - - kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.server" $ }} -{{ include "prometheus.namespace" $ | indent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ $.Values.server.useExistingClusterRoleName }} -{{ end -}} -{{ end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/service.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/service.yaml deleted file mode 100644 index 68f9889..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/service.yaml +++ /dev/null @@ -1,60 +0,0 @@ -{{- if .Values.server.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.server.service.annotations }} - annotations: -{{ toYaml .Values.server.service.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -{{- if .Values.server.service.labels }} -{{ toYaml .Values.server.service.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.server.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: -{{- if .Values.server.service.clusterIP }} - clusterIP: {{ .Values.server.service.clusterIP }} -{{- end }} -{{- if .Values.server.service.externalIPs }} - externalIPs: -{{ toYaml .Values.server.service.externalIPs | indent 4 }} -{{- end }} -{{- if .Values.server.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} -{{- end }} -{{- if .Values.server.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} - - {{ $cidr }} - {{- end }} -{{- end }} - ports: - - name: http - port: {{ .Values.server.service.servicePort }} - protocol: TCP - targetPort: 9090 - {{- if .Values.server.service.nodePort }} - nodePort: {{ .Values.server.service.nodePort }} - {{- end }} - {{- if .Values.server.service.gRPC.enabled }} - - name: grpc - port: {{ .Values.server.service.gRPC.servicePort }} - protocol: TCP - targetPort: 10901 - {{- if .Values.server.service.gRPC.nodePort }} - nodePort: {{ .Values.server.service.gRPC.nodePort }} - {{- end }} - {{- end }} - selector: - {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} - statefulset.kubernetes.io/pod-name: {{ template "prometheus.server.fullname" . }}-{{ .Values.server.service.statefulsetReplica.replica }} - {{- else -}} - {{- include "prometheus.server.matchLabels" . | nindent 4 }} -{{- if .Values.server.service.sessionAffinity }} - sessionAffinity: {{ .Values.server.service.sessionAffinity }} -{{- end }} - {{- end }} - type: "{{ .Values.server.service.type }}" -{{- end -}} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/serviceaccount.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/serviceaccount.yaml deleted file mode 100644 index 9c0502a..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.serviceAccounts.server.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.serviceAccountName.server" . }} -{{ include "prometheus.namespace" . | indent 2 }} - annotations: -{{ toYaml .Values.serviceAccounts.server.annotations | indent 4 }} -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/sts.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/sts.yaml deleted file mode 100644 index e6fa710..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/sts.yaml +++ /dev/null @@ -1,286 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.server.statefulSet.enabled -}} -apiVersion: apps/v1 -kind: StatefulSet -metadata: -{{- if .Values.server.statefulSet.annotations }} - annotations: - {{ toYaml .Values.server.statefulSet.annotations | nindent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - {{- if .Values.server.statefulSet.labels}} - {{ toYaml .Values.server.statefulSet.labels | nindent 4 }} - {{- end}} - name: {{ template "prometheus.server.fullname" . }} -{{ include "prometheus.namespace" . | indent 2 }} -spec: - serviceName: {{ template "prometheus.server.fullname" . }}-headless - selector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 6 }} - replicas: {{ .Values.server.replicaCount }} - podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} - template: - metadata: - {{- if .Values.server.podAnnotations }} - annotations: - {{ toYaml .Values.server.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 8 }} - {{- if .Values.server.podLabels}} - {{ toYaml .Values.server.podLabels | nindent 8 }} - {{- end}} - spec: -{{- if .Values.server.priorityClassName }} - priorityClassName: "{{ .Values.server.priorityClassName }}" -{{- end }} -{{- if .Values.server.schedulerName }} - schedulerName: "{{ .Values.server.schedulerName }}" -{{- end }} -{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} - {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} - enableServiceLinks: true - {{- else }} - enableServiceLinks: false - {{- end }} -{{- end }} - serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} - {{- if .Values.server.extraInitContainers }} - initContainers: -{{ toYaml .Values.server.extraInitContainers | indent 8 }} - {{- end }} - containers: - {{- if .Values.configmapReload.prometheus.enabled }} - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} - image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" - imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" - args: - - --volume-dir=/etc/config - - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload - {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} - - --volume-dir={{ . }} - {{- end }} - resources: -{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - readOnly: true - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- end }} - - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} - image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag }}" - imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" - {{- if .Values.server.env }} - env: -{{ toYaml .Values.server.env | indent 12}} - {{- end }} - args: - {{- if .Values.server.prefixURL }} - - --web.route-prefix={{ .Values.server.prefixURL }} - {{- end }} - {{- if .Values.server.retention }} - - --storage.tsdb.retention.time={{ .Values.server.retention }} - {{- end }} - - --config.file={{ .Values.server.configPath }} - {{- if .Values.server.storagePath }} - - --storage.tsdb.path={{ .Values.server.storagePath }} - {{- else }} - - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} - {{- end }} - - --web.console.libraries=/etc/prometheus/console_libraries - - --web.console.templates=/etc/prometheus/consoles - {{- range .Values.server.extraFlags }} - - --{{ . }} - {{- end }} - {{- range $key, $value := .Values.server.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- if .Values.server.baseURL }} - - --web.external-url={{ .Values.server.baseURL }} - {{- end }} - ports: - - containerPort: 9090 - readinessProbe: - {{- if not .Values.server.tcpSocketProbeEnabled }} - httpGet: - path: {{ .Values.server.prefixURL }}/-/ready - port: 9090 - scheme: {{ .Values.server.probeScheme }} - {{- else }} - tcpSocket: - port: 9090 - {{- end }} - initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} - periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} - timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} - failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} - successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} - livenessProbe: - {{- if not .Values.server.tcpSocketProbeEnabled }} - httpGet: - path: {{ .Values.server.prefixURL }}/-/healthy - port: 9090 - scheme: {{ .Values.server.probeScheme }} - {{- else }} - tcpSocket: - port: 9090 - {{- end }} - initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} - periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} - timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} - failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} - successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} - resources: -{{ toYaml .Values.server.resources | indent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - - name: storage-volume - mountPath: {{ .Values.server.persistentVolume.mountPath }} - subPath: "{{ .Values.server.persistentVolume.subPath }}" - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- if .Values.server.extraVolumeMounts }} - {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} - {{- end }} - {{- if .Values.server.sidecarContainers }} - {{- range $name, $spec := .Values.server.sidecarContainers }} - - name: {{ $name }} - {{- if kindIs "string" $spec }} - {{- tpl $spec $ | nindent 10 }} - {{- else }} - {{- toYaml $spec | nindent 10 }} - {{- end }} - {{- end }} - {{- end }} - hostNetwork: {{ .Values.server.hostNetwork }} - {{- if .Values.server.dnsPolicy }} - dnsPolicy: {{ .Values.server.dnsPolicy }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - {{- if .Values.server.nodeSelector }} - nodeSelector: -{{ toYaml .Values.server.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.server.hostAliases }} - hostAliases: -{{ toYaml .Values.server.hostAliases | indent 8 }} - {{- end }} - {{- if .Values.server.dnsConfig }} - dnsConfig: -{{ toYaml .Values.server.dnsConfig | indent 8 }} - {{- end }} - {{- if .Values.server.securityContext }} - securityContext: -{{ toYaml .Values.server.securityContext | indent 8 }} - {{- end }} - {{- if .Values.server.tolerations }} - tolerations: -{{ toYaml .Values.server.tolerations | indent 8 }} - {{- end }} - {{- if .Values.server.affinity }} - affinity: -{{ toYaml .Values.server.affinity | indent 8 }} - {{- end }} - terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} - volumes: - - name: config-volume - configMap: - name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - hostPath: - path: {{ .hostPath }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- with .optional }} - optional: {{ . }} - {{- end }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ .name }} - configMap: - name: {{ .configMap }} - {{- with .optional }} - optional: {{ . }} - {{- end }} - {{- end }} -{{- if .Values.server.extraVolumes }} -{{ toYaml .Values.server.extraVolumes | indent 8}} -{{- end }} -{{- if .Values.server.persistentVolume.enabled }} - volumeClaimTemplates: - - metadata: - name: storage-volume - {{- if .Values.server.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }} - {{- end }} - spec: - accessModes: -{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }} - resources: - requests: - storage: "{{ .Values.server.persistentVolume.size }}" - {{- if .Values.server.persistentVolume.storageClass }} - {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} - storageClassName: "" - {{- else }} - storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" - {{- end }} - {{- end }} -{{- else }} - - name: storage-volume - emptyDir: - {{- if .Values.server.emptyDir.sizeLimit }} - sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} - {{- else }} - {} - {{- end -}} -{{- end }} -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/charts/prometheus/templates/server/vpa.yaml b/deploy/helm/fadvisor/charts/prometheus/templates/server/vpa.yaml deleted file mode 100644 index 981a9b4..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/templates/server/vpa.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if .Values.server.enabled -}} -{{- if .Values.server.verticalAutoscaler.enabled -}} -apiVersion: autoscaling.k8s.io/v1beta2 -kind: VerticalPodAutoscaler -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }}-vpa -{{ include "prometheus.namespace" . | indent 2 }} -spec: - targetRef: - apiVersion: "apps/v1" -{{- if .Values.server.statefulSet.enabled }} - kind: StatefulSet -{{- else }} - kind: Deployment -{{- end }} - name: {{ template "prometheus.server.fullname" . }} - updatePolicy: - updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} - resourcePolicy: - containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} -{{- end -}} {{/* if .Values.server.verticalAutoscaler.enabled */}} -{{- end -}} {{/* .Values.server.enabled */}} diff --git a/deploy/helm/fadvisor/charts/prometheus/values.yaml b/deploy/helm/fadvisor/charts/prometheus/values.yaml deleted file mode 100644 index fb0e3bc..0000000 --- a/deploy/helm/fadvisor/charts/prometheus/values.yaml +++ /dev/null @@ -1,1666 +0,0 @@ -fullnameOverride: prometheus - -rbac: - create: true - -podSecurityPolicy: - enabled: false - -imagePullSecrets: -# - name: "image-pull-secret" - -## Define serviceAccount names for components. Defaults to component's fully qualified name. -## -serviceAccounts: - alertmanager: - create: true - name: - annotations: {} - pushgateway: - create: true - name: - annotations: {} - server: - create: true - name: - annotations: {} - -alertmanager: - ## If false, alertmanager will not be installed - ## - enabled: false - - ## Use a ClusterRole (and ClusterRoleBinding) - ## - If set to false - we define a Role and RoleBinding in the defined namespaces ONLY - ## This makes alertmanager work - for users who do not have ClusterAdmin privs, but wants alertmanager to operate on their own namespaces, instead of clusterwide. - useClusterRole: true - - ## Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. - useExistingRole: false - - ## alertmanager container name - ## - name: alertmanager - - ## alertmanager container image - ## - image: - repository: quay.io/prometheus/alertmanager - tag: v0.23.0 - pullPolicy: IfNotPresent - - ## alertmanager priorityClassName - ## - priorityClassName: "" - - ## Additional alertmanager container arguments - ## - extraArgs: {} - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: [] - - ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug - ## so that the various internal URLs are still able to access as they are in the default case. - ## (Optional) - prefixURL: "" - - ## External URL which can access alertmanager - baseURL: "http://localhost:9093" - - ## Additional alertmanager container environment variable - ## For instance to add a http_proxy - ## - extraEnv: {} - - ## Additional alertmanager Secret mounts - # Defines additional mounts with secrets. Secrets must be manually created in the namespace. - extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # subPath: "" - # secretName: alertmanager-secret-files - # readOnly: true - - ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} - ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configMapOverrideName: "" - - ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config - ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configFromSecret: "" - - ## The configuration file name to be loaded to alertmanager - ## Must match the key within configuration loaded from ConfigMap/Secret - ## - configFileName: alertmanager.yml - - ingress: - ## If true, alertmanager Ingress will be created - ## - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - ## alertmanager Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## alertmanager Ingress additional labels - ## - extraLabels: {} - - ## alertmanager Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - alertmanager.domain.com - # - domain.com/alertmanager - - path: / - - # pathType is only for k8s >= 1.18 - pathType: Prefix - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## alertmanager Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-alerts-tls - # hosts: - # - alertmanager.domain.com - - ## Alertmanager Deployment Strategy type - # strategy: - # type: Recreate - - ## Node tolerations for alertmanager scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for alertmanager pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Pod affinity - ## - affinity: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - persistentVolume: - ## If true, alertmanager will create/use a Persistent Volume Claim - ## If false, use emptyDir - ## - enabled: true - - ## alertmanager data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## alertmanager data Persistent Volume Claim annotations - ## - annotations: {} - - ## alertmanager data Persistent Volume existing claim name - ## Requires alertmanager.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## alertmanager data Persistent Volume mount root path - ## - mountPath: /data - - ## alertmanager data Persistent Volume size - ## - size: 10Gi - - ## alertmanager data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## alertmanager data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of alertmanager data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - ## Persistent Volume Claim Selector - ## Useful if Persistent Volumes have been provisioned in advance - ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector - ## - # selector: - # matchLabels: - # release: "stable" - # matchExpressions: - # - { key: environment, operator: In, values: [ dev ] } - - emptyDir: - ## alertmanager emptyDir volume size limit - ## - sizeLimit: "" - - ## Annotations to be added to alertmanager pods - ## - podAnnotations: {} - ## Tell prometheus to use a specific set of alertmanager pods - ## instead of all alertmanager pods found in the same namespace - ## Useful if you deploy multiple releases within the same namespace - ## - ## prometheus.io/probe: alertmanager-teamA - - ## Labels to be added to Prometheus AlertManager pods - ## - podLabels: {} - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) - ## - replicaCount: 1 - - ## Annotations to be added to deployment - ## - deploymentAnnotations: {} - - statefulSet: - ## If true, use a statefulset instead of a deployment for pod management. - ## This allows to scale replicas to more than 1 pod - ## - enabled: false - - annotations: {} - labels: {} - podManagementPolicy: OrderedReady - - ## Alertmanager headless service to use for the statefulset - ## - headless: - annotations: {} - labels: {} - - ## Enabling peer mesh service end points for enabling the HA alert manager - ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md - enableMeshPeer: false - - servicePort: 80 - - ## alertmanager resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 10m - # memory: 32Mi - # requests: - # cpu: 10m - # memory: 32Mi - - # Custom DNS configuration to be added to alertmanager pods - dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - - ## Security context to be added to alertmanager pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - runAsGroup: 65534 - fsGroup: 65534 - - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Enabling peer mesh service end points for enabling the HA alert manager - ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md - # enableMeshPeer : true - - ## List of IP addresses at which the alertmanager service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 80 -# nodePort: 30000 - sessionAffinity: None - type: ClusterIP - - ## List of initial peers - ## Ref: https://github.com/prometheus/alertmanager/blob/main/README.md#high-availability - clusterPeers: [] - -## Monitors ConfigMap changes and POSTs to a URL -## Ref: https://github.com/jimmidyson/configmap-reload -## -configmapReload: - prometheus: - ## If false, the configmap-reload container will not be deployed - ## - enabled: true - - ## configmap-reload container name - ## - name: configmap-reload - - ## configmap-reload container image - ## - image: - repository: jimmidyson/configmap-reload - tag: v0.5.0 - pullPolicy: IfNotPresent - - ## Additional configmap-reload container arguments - ## - extraArgs: {} - ## Additional configmap-reload volume directories - ## - extraVolumeDirs: [] - - - ## Additional configmap-reload mounts - ## - extraConfigmapMounts: [] - # - name: prometheus-alerts - # mountPath: /etc/alerts.d - # subPath: "" - # configMap: prometheus-alerts - # readOnly: true - - - ## configmap-reload resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - alertmanager: - ## If false, the configmap-reload container will not be deployed - ## - enabled: true - - ## configmap-reload container name - ## - name: configmap-reload - - ## configmap-reload container image - ## - image: - repository: jimmidyson/configmap-reload - tag: v0.5.0 - pullPolicy: IfNotPresent - - ## Additional configmap-reload container arguments - ## - extraArgs: {} - ## Additional configmap-reload volume directories - ## - extraVolumeDirs: [] - - - ## Additional configmap-reload mounts - ## - extraConfigmapMounts: [] - # - name: prometheus-alerts - # mountPath: /etc/alerts.d - # subPath: "" - # configMap: prometheus-alerts - # readOnly: true - - - ## configmap-reload resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - - -server: - ## Prometheus server container name - ## - enabled: true - - ## Use a ClusterRole (and ClusterRoleBinding) - ## - If set to false - we define a RoleBinding in the defined namespaces ONLY - ## - ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. - ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. - ## - ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. - ## - # useExistingClusterRoleName: nameofclusterrole - - ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. - # namespaces: - # - yournamespace - - name: server - - # sidecarContainers - add more containers to prometheus server - # Key/Value where Key is the sidecar `- name: ` - # Example: - # sidecarContainers: - # webserver: - # image: nginx - sidecarContainers: [] - - # sidecarTemplateValues - context to be used in template for sidecarContainers - # Example: - # sidecarTemplateValues: *your-custom-globals - # sidecarContainers: - # webserver: |- - # {{ include "webserver-container-template" . }} - # Template for `webserver-container-template` might looks like this: - # image: "{{ .Values.server.sidecarTemplateValues.repository }}:{{ .Values.server.sidecarTemplateValues.tag }}" - # ... - # - sidecarTemplateValues: {} - - ## Prometheus server container image - ## - image: - repository: quay.io/prometheus/prometheus - tag: v2.31.1 - pullPolicy: IfNotPresent - - ## prometheus server priorityClassName - ## - priorityClassName: "" - - ## EnableServiceLinks indicates whether information about services should be injected - ## into pod's environment variables, matching the syntax of Docker links. - ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. - ## - enableServiceLinks: true - - ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug - ## so that the various internal URLs are still able to access as they are in the default case. - ## (Optional) - prefixURL: "" - - ## External URL which can access prometheus - ## Maybe same with Ingress host name - baseURL: "" - - ## Additional server container environment variables - ## - ## You specify this manually like you would a raw deployment manifest. - ## This means you can bind in environment variables from secrets. - ## - ## e.g. static environment variable: - ## - name: DEMO_GREETING - ## value: "Hello from the environment" - ## - ## e.g. secret environment variable: - ## - name: USERNAME - ## valueFrom: - ## secretKeyRef: - ## name: mysecret - ## key: username - env: [] - - extraFlags: - - web.enable-lifecycle - ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as - ## deleting time series. This is disabled by default. - # - web.enable-admin-api - ## - ## storage.tsdb.no-lockfile flag controls BD locking - # - storage.tsdb.no-lockfile - ## - ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) - # - storage.tsdb.wal-compression - - ## Path to a configuration file on prometheus server container FS - configPath: /etc/config/prometheus.yml - - ### The data directory used by prometheus to set --storage.tsdb.path - ### When empty server.persistentVolume.mountPath is used instead - storagePath: "" - - global: - ## How frequently to scrape targets by default - ## - scrape_interval: 1m - ## How long until a scrape request times out - ## - scrape_timeout: 10s - ## How frequently to evaluate rules - ## - evaluation_interval: 1m - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write - ## - remoteWrite: [] - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read - ## - remoteRead: [] - - ## Custom HTTP headers for Liveness/Readiness/Startup Probe - ## - ## Useful for providing HTTP Basic Auth to healthchecks - probeHeaders: [] - - ## Additional Prometheus server container arguments - ## - extraArgs: {} - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: [] - - ## Additional Prometheus server Volume mounts - ## - extraVolumeMounts: [] - - ## Additional Prometheus server Volumes - ## - extraVolumes: [] - - ## Additional Prometheus server hostPath mounts - ## - extraHostPathMounts: [] - # - name: certs-dir - # mountPath: /etc/kubernetes/certs - # subPath: "" - # hostPath: /etc/kubernetes/certs - # readOnly: true - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /prometheus - # subPath: "" - # configMap: certs-configmap - # readOnly: true - - ## Additional Prometheus server Secret mounts - # Defines additional mounts with secrets. Secrets must be manually created in the namespace. - extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # subPath: "" - # secretName: prom-secret-files - # readOnly: true - - ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} - ## Defining configMapOverrideName will cause templates/server-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configMapOverrideName: "" - - ingress: - ## If true, Prometheus server Ingress will be created - ## - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - ## Prometheus server Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## Prometheus server Ingress additional labels - ## - extraLabels: {} - - ## Prometheus server Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - prometheus.domain.com - # - domain.com/prometheus - - path: / - - # pathType is only for k8s >= 1.18 - pathType: Prefix - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## Prometheus server Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-server-tls - # hosts: - # - prometheus.domain.com - - ## Server Deployment Strategy type - # strategy: - # type: Recreate - - ## hostAliases allows adding entries to /etc/hosts inside the containers - hostAliases: [] - # - ip: "127.0.0.1" - # hostnames: - # - "example.com" - - ## Node tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for Prometheus server pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Pod affinity - ## - affinity: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - persistentVolume: - ## If true, Prometheus server will create/use a Persistent Volume Claim - ## If false, use emptyDir - ## - enabled: true - - ## Prometheus server data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## Prometheus server data Persistent Volume annotations - ## - annotations: {} - - ## Prometheus server data Persistent Volume existing claim name - ## Requires server.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## Prometheus server data Persistent Volume mount root path - ## - mountPath: /data - - ## Prometheus server data Persistent Volume size - ## - size: 10Gi - - ## Prometheus server data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## Prometheus server data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of Prometheus server data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - ## Persistent Volume Claim Selector - ## Useful if Persistent Volumes have been provisioned in advance - ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector - ## - # selector: - # matchLabels: - # release: "stable" - # matchExpressions: - # - { key: environment, operator: In, values: [ dev ] } - - emptyDir: - ## Prometheus server emptyDir volume size limit - ## - sizeLimit: "" - - ## Annotations to be added to Prometheus server pods - ## - podAnnotations: {} - # iam.amazonaws.com/role: prometheus - - ## Labels to be added to Prometheus server pods - ## - podLabels: {} - - ## Prometheus AlertManager configuration - ## - alertmanagers: [] - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) - ## - replicaCount: 1 - - ## Annotations to be added to deployment - ## - deploymentAnnotations: {} - - statefulSet: - ## If true, use a statefulset instead of a deployment for pod management. - ## This allows to scale replicas to more than 1 pod - ## - enabled: false - - annotations: {} - labels: {} - podManagementPolicy: OrderedReady - - ## Alertmanager headless service to use for the statefulset - ## - headless: - annotations: {} - labels: {} - servicePort: 80 - ## Enable gRPC port on service to allow auto discovery with thanos-querier - gRPC: - enabled: false - servicePort: 10901 - # nodePort: 10901 - - ## Prometheus server readiness and liveness probe initial delay and timeout - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ - ## - tcpSocketProbeEnabled: false - probeScheme: HTTP - readinessProbeInitialDelay: 30 - readinessProbePeriodSeconds: 5 - readinessProbeTimeout: 4 - readinessProbeFailureThreshold: 3 - readinessProbeSuccessThreshold: 1 - livenessProbeInitialDelay: 30 - livenessProbePeriodSeconds: 15 - livenessProbeTimeout: 10 - livenessProbeFailureThreshold: 3 - livenessProbeSuccessThreshold: 1 - startupProbe: - enabled: false - periodSeconds: 5 - failureThreshold: 30 - timeoutSeconds: 10 - - ## Prometheus server resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 500m - # memory: 512Mi - # requests: - # cpu: 500m - # memory: 512Mi - - # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), - # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working - ## - hostNetwork: false - - # When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet - dnsPolicy: ClusterFirst - - ## Vertical Pod Autoscaler config - ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler - verticalAutoscaler: - ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) - enabled: false - # updateMode: "Auto" - # containerPolicies: - # - containerName: 'prometheus-server' - - # Custom DNS configuration to be added to prometheus server pods - dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - ## Security context to be added to server pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - runAsGroup: 65534 - fsGroup: 65534 - - service: - annotations: {} - labels: {} - clusterIP: "" - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 80 - nodePort: 31111 - sessionAffinity: None - type: NodePort - - ## Enable gRPC port on service to allow auto discovery with thanos-querier - gRPC: - enabled: false - servicePort: 10901 - # nodePort: 10901 - - ## If using a statefulSet (statefulSet.enabled=true), configure the - ## service to connect to a specific replica to have a consistent view - ## of the data. - statefulsetReplica: - enabled: false - replica: 0 - - ## Prometheus server pod termination grace period - ## - terminationGracePeriodSeconds: 300 - - ## Prometheus data retention period (default if not specified is 15 days) - ## - retention: "15d" - -pushgateway: - ## If false, pushgateway will not be installed - ## - enabled: false - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## pushgateway container name - ## - name: pushgateway - - ## pushgateway container image - ## - image: - repository: prom/pushgateway - tag: v1.4.2 - pullPolicy: IfNotPresent - - ## pushgateway priorityClassName - ## - priorityClassName: "" - - ## Additional pushgateway container arguments - ## - ## for example: persistence.file: /data/pushgateway.data - extraArgs: {} - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: [] - - ingress: - ## If true, pushgateway Ingress will be created - ## - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - ## pushgateway Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## pushgateway Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - pushgateway.domain.com - # - domain.com/pushgateway - - path: / - - # pathType is only for k8s >= 1.18 - pathType: Prefix - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## pushgateway Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-alerts-tls - # hosts: - # - pushgateway.domain.com - - ## Node tolerations for pushgateway scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for pushgateway pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Annotations to be added to pushgateway pods - ## - podAnnotations: {} - - ## Labels to be added to pushgateway pods - ## - podLabels: {} - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - replicaCount: 1 - - ## Annotations to be added to deployment - ## - deploymentAnnotations: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## pushgateway resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 10m - # memory: 32Mi - # requests: - # cpu: 10m - # memory: 32Mi - - ## Vertical Pod Autoscaler config - ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler - verticalAutoscaler: - ## If true a VPA object will be created for the controller - enabled: false - # updateMode: "Auto" - # containerPolicies: - # - containerName: 'prometheus-pushgateway' - - # Custom DNS configuration to be added to push-gateway pods - dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - - ## Security context to be added to push-gateway pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - - service: - annotations: - prometheus.io/probe: pushgateway - labels: {} - clusterIP: "" - - ## List of IP addresses at which the pushgateway service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 9091 - type: ClusterIP - - ## pushgateway Deployment Strategy type - # strategy: - # type: Recreate - - persistentVolume: - ## If true, pushgateway will create/use a Persistent Volume Claim - ## - enabled: false - - ## pushgateway data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## pushgateway data Persistent Volume Claim annotations - ## - annotations: {} - - ## pushgateway data Persistent Volume existing claim name - ## Requires pushgateway.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## pushgateway data Persistent Volume mount root path - ## - mountPath: /data - - ## pushgateway data Persistent Volume size - ## - size: 10Gi - - ## pushgateway data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## pushgateway data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of pushgateway data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - -## alertmanager ConfigMap entries -## -alertmanagerFiles: - alertmanager.yml: - global: {} - # slack_api_url: '' - - receivers: - - name: default-receiver - # slack_configs: - # - channel: '@you' - # send_resolved: true - - route: - group_wait: 10s - group_interval: 5m - receiver: default-receiver - repeat_interval: 3h - -## Prometheus server ConfigMap entries -## -serverFiles: - - ## Alerts configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ - alerting_rules.yml: {} - # groups: - # - name: Instances - # rules: - # - alert: InstanceDown - # expr: up == 0 - # for: 5m - # labels: - # severity: page - # annotations: - # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' - # summary: 'Instance {{ $labels.instance }} down' - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml - alerts: {} - - ## Records configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ - recording_rules.yml: - groups: - - name: costs.rules - interval: 3600s - rules: - - expr: | - sum(label_replace(irate(container_cpu_usage_seconds_total{container!="POD", container!="",image!=""}[1h]), "node", "$1", "instance", "(.*)")) by (container, pod, node, namespace) * on (node) group_left() avg(avg_over_time(node_cpu_hourly_cost[1h])) by (node) - record: namespace:container_cpu_usage_costs_hourly:sum_rate - - expr: | - sum(label_replace(avg_over_time(container_memory_working_set_bytes{container!="POD",container!="",image!=""}[1h]), "node", "$1", "instance", "(.*)")) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 * on (node) group_left() avg(avg_over_time(node_ram_hourly_cost[1h])) by (node) - record: namespace:container_memory_usage_costs_hourly:sum_rate - - expr: | - avg(avg_over_time(node_cpu_hourly_cost[1h])) by (node) - record: node:node_cpu_hourly_cost:avg - - expr: | - avg(avg_over_time(node_ram_hourly_cost[1h])) by (node) - record: node:node_ram_hourly_cost:avg - - expr: | - avg(avg_over_time(node_total_hourly_cost[1h])) by (node) - record: node:node_total_hourly_cost:avg - - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml - rules: {} - - prometheus.yml: - rule_files: - - /etc/config/recording_rules.yml - - /etc/config/alerting_rules.yml - ## Below two files are DEPRECATED will be removed from this default values file - - /etc/config/rules - - /etc/config/alerts - - scrape_configs: - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - # A scrape configuration for running Prometheus on a Kubernetes cluster. - # This uses separate scrape configs for cluster components (i.e. API server, node) - # and services to allow each to use different authentication configs. - # - # Kubernetes labels will be added as Prometheus labels on metrics via the - # `labelmap` relabeling action. - - # Scrape config for API servers. - # - # Kubernetes exposes API servers as endpoints to the default/kubernetes - # service so this uses `endpoints` role and uses relabelling to only keep - # the endpoints associated with the default/kubernetes service using the - # default named port `https`. This works for single API server deployments as - # well as HA API server deployments. - - job_name: 'kubernetes-apiservers' - - kubernetes_sd_configs: - - role: endpoints - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - # Keep only the default/kubernetes service endpoints for the https port. This - # will add targets for each API server which Kubernetes adds an endpoint to - # the default/kubernetes service. - relabel_configs: - - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] - action: keep - regex: default;kubernetes;https - - - job_name: 'kubernetes-nodes' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics - - - - job_name: 'kubernetes-nodes-cadvisor' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - # This configuration will work only on kubelet 1.7.3+ - # As the scrape endpoints for cAdvisor have changed - # if you are using older version you need to change the replacement to - # replacement: /api/v1/nodes/$1:4194/proxy/metrics - # more info here https://github.com/coreos/prometheus-operator/issues/633 - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - - # Scrape config for service endpoints. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/scrape`: Only scrape services that have a value of - # `true`, except if `prometheus.io/scrape-slow` is set to `true` as well. - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: If the metrics are exposed on a different port to the - # service then set this appropriately. - # * `prometheus.io/param_`: If the metrics endpoint uses parameters - # then you can set any parameter - - job_name: 'kubernetes-service-endpoints' - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] - action: drop - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) - replacement: __param_$1 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: service - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: node - - # Scrape config for slow service endpoints; same as above, but with a larger - # timeout and a larger interval - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: If the metrics are exposed on a different port to the - # service then set this appropriately. - # * `prometheus.io/param_`: If the metrics endpoint uses parameters - # then you can set any parameter - - job_name: 'kubernetes-service-endpoints-slow' - - scrape_interval: 5m - scrape_timeout: 30s - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) - replacement: __param_$1 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: service - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: node - - - job_name: 'prometheus-pushgateway' - honor_labels: true - - kubernetes_sd_configs: - - role: service - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - action: keep - regex: pushgateway - - # Example scrape config for probing services via the Blackbox Exporter. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/probe`: Only probe services that have a value of `true` - - job_name: 'kubernetes-services' - - metrics_path: /probe - params: - module: [http_2xx] - - kubernetes_sd_configs: - - role: service - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - action: keep - regex: true - - source_labels: [__address__] - target_label: __param_target - - target_label: __address__ - replacement: blackbox - - source_labels: [__param_target] - target_label: instance - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - target_label: namespace - - source_labels: [__meta_kubernetes_service_name] - target_label: service - - # Example scrape config for pods - # - # The relabeling allows the actual pod scrape endpoint to be configured via the - # following annotations: - # - # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`, - # except if `prometheus.io/scrape-slow` is set to `true` as well. - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. - - job_name: 'kubernetes-pods' - - kubernetes_sd_configs: - - role: pod - - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] - action: drop - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] - action: replace - regex: (https?) - target_label: __scheme__ - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) - replacement: __param_$1 - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - - source_labels: [__meta_kubernetes_pod_phase] - regex: Pending|Succeeded|Failed|Completed - action: drop - - # Example Scrape config for pods which should be scraped slower. An useful example - # would be stackriver-exporter which queries an API on every scrape of the pod - # - # The relabeling allows the actual pod scrape endpoint to be configured via the - # following annotations: - # - # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. - - job_name: 'kubernetes-pods-slow' - - scrape_interval: 5m - scrape_timeout: 30s - - kubernetes_sd_configs: - - role: pod - - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] - action: replace - regex: (https?) - target_label: __scheme__ - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) - replacement: __param_$1 - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - - source_labels: [__meta_kubernetes_pod_phase] - regex: Pending|Succeeded|Failed|Completed - action: drop - -# adds additional scrape configs to prometheus.yml -# must be a string so you have to add a | after extraScrapeConfigs: -# example adds prometheus-blackbox-exporter scrape config -extraScrapeConfigs: |- - - job_name: 'node-exporter' - kubernetes_sd_configs: - - role: node - relabel_configs: - - source_labels: [__address__] - regex: '(.*):10250' - replacement: '${1}:9100' - target_label: __address__ - action: replace - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - - job_name: 'kube-state-metrics' - static_configs: - - targets: ['kube-state-metrics.crane-system.svc.cluster.local:8080'] - - # this is used to scrape cost exporter - - job_name: "fadvisor-cost-exporter" - honor_timestamps: true - scheme: http - metrics_path: /metrics - static_configs: - - targets: ['cost-exporter.crane-system.svc.cluster.local:8081'] - -# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager -# useful in H/A prometheus with different external labels but the same alerts -alertRelabelConfigs: - # alert_relabel_configs: - # - source_labels: [dc] - # regex: (.+)\d+ - # target_label: dc - -networkPolicy: - ## Enable creation of NetworkPolicy resources. - ## - enabled: false - -# Force namespace of namespaced resources -forceNamespace: null diff --git a/deploy/helm/fadvisor/requirements.yaml b/deploy/helm/fadvisor/requirements.yaml deleted file mode 100644 index 5f7d80c..0000000 --- a/deploy/helm/fadvisor/requirements.yaml +++ /dev/null @@ -1,13 +0,0 @@ -dependencies: - - name: kube-state-metrics - condition: fadvisor.kube-state-metrics.enabled,kube-state-metrics.enabled - repository: file://./charts/kube-state-metrics - - name: node-exporter - condition: fadvisor.node-exporter.enabled,node-exporter.enabled - repository: file://./charts/node-exporter - - name: prometheus - condition: fadvisor.prometheus.enabled,prometheus.enabled - repository: file://./charts/prometheus - - name: grafana - condition: fadvisor.grafana.enabled,grafana.enabled - repository: file://./charts/grafana \ No newline at end of file diff --git a/deploy/helm/fadvisor/templates/_helpers.tpl b/deploy/helm/fadvisor/templates/_helpers.tpl deleted file mode 100644 index 54932ae..0000000 --- a/deploy/helm/fadvisor/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "fadvisor.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "fadvisor.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "fadvisor.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "fadvisor.labels" -}} -helm.sh/chart: {{ include "fadvisor.chart" . }} -{{ include "fadvisor.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "fadvisor.selectorLabels" -}} -app.kubernetes.io/name: {{ include "fadvisor.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "fadvisor.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "fadvisor.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/deploy/helm/fadvisor/values.yaml b/deploy/helm/fadvisor/values.yaml deleted file mode 100644 index 2286d9f..0000000 --- a/deploy/helm/fadvisor/values.yaml +++ /dev/null @@ -1,622 +0,0 @@ -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -cost-exporter: - enabled: true - replicaCount: 1 - - # image for prometheus deployment - image: - repository: "docker.io/gocrane/fadvisor-cost-exporter" - pullPolicy: Always - tag: "v0.1.0" - - service: - type: ClusterIP - port: 8081 - - tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoSchedule" - - extraArgs: - v: 4 - -node-exporter: - enabled: true - -kube-state-metrics: - enabled: true - -prometheus: - enabled: true - server: - ## Prometheus server container name - ## - enabled: true - - ## Use a ClusterRole (and ClusterRoleBinding) - ## - If set to false - we define a RoleBinding in the defined namespaces ONLY - ## - ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. - ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. - ## - ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. - ## - # useExistingClusterRoleName: nameofclusterrole - - ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. - # namespaces: - # - yournamespace - - name: server - - # sidecarContainers - add more containers to prometheus server - # Key/Value where Key is the sidecar `- name: ` - # Example: - # sidecarContainers: - # webserver: - # image: nginx - sidecarContainers: [ ] - - # sidecarTemplateValues - context to be used in template for sidecarContainers - # Example: - # sidecarTemplateValues: *your-custom-globals - # sidecarContainers: - # webserver: |- - # {{ include "webserver-container-template" . }} - # Template for `webserver-container-template` might looks like this: - # image: "{{ .Values.server.sidecarTemplateValues.repository }}:{{ .Values.server.sidecarTemplateValues.tag }}" - # ... - # - sidecarTemplateValues: { } - - ## Prometheus server container image - ## - image: - repository: quay.io/prometheus/prometheus - tag: v2.31.1 - pullPolicy: IfNotPresent - - ## prometheus server priorityClassName - ## - priorityClassName: "" - - ## EnableServiceLinks indicates whether information about services should be injected - ## into pod's environment variables, matching the syntax of Docker links. - ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. - ## - enableServiceLinks: true - - ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug - ## so that the various internal URLs are still able to access as they are in the default case. - ## (Optional) - prefixURL: "" - - ## External URL which can access prometheus - ## Maybe same with Ingress host name - baseURL: "" - - ## Additional server container environment variables - ## - ## You specify this manually like you would a raw deployment manifest. - ## This means you can bind in environment variables from secrets. - ## - ## e.g. static environment variable: - ## - name: DEMO_GREETING - ## value: "Hello from the environment" - ## - ## e.g. secret environment variable: - ## - name: USERNAME - ## valueFrom: - ## secretKeyRef: - ## name: mysecret - ## key: username - env: [ ] - - extraFlags: - - web.enable-lifecycle - ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as - ## deleting time series. This is disabled by default. - # - web.enable-admin-api - ## - ## storage.tsdb.no-lockfile flag controls BD locking - # - storage.tsdb.no-lockfile - ## - ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) - # - storage.tsdb.wal-compression - - ## Path to a configuration file on prometheus server container FS - configPath: /etc/config/prometheus.yml - - ### The data directory used by prometheus to set --storage.tsdb.path - ### When empty server.persistentVolume.mountPath is used instead - storagePath: "" - - global: - ## How frequently to scrape targets by default - ## - scrape_interval: 1m - ## How long until a scrape request times out - ## - scrape_timeout: 10s - ## How frequently to evaluate rules - ## - evaluation_interval: 1m - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write - ## - remoteWrite: [ ] - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read - ## - remoteRead: [ ] - - ## Custom HTTP headers for Liveness/Readiness/Startup Probe - ## - ## Useful for providing HTTP Basic Auth to healthchecks - probeHeaders: [ ] - - ## Additional Prometheus server container arguments - ## - extraArgs: { } - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: [ ] - - ## Additional Prometheus server Volume mounts - ## - extraVolumeMounts: [ ] - - ## Additional Prometheus server Volumes - ## - extraVolumes: [ ] - - ## Additional Prometheus server hostPath mounts - ## - extraHostPathMounts: [ ] - # - name: certs-dir - # mountPath: /etc/kubernetes/certs - # subPath: "" - # hostPath: /etc/kubernetes/certs - # readOnly: true - - extraConfigmapMounts: [ ] - # - name: certs-configmap - # mountPath: /prometheus - # subPath: "" - # configMap: certs-configmap - # readOnly: true - - ## Additional Prometheus server Secret mounts - # Defines additional mounts with secrets. Secrets must be manually created in the namespace. - extraSecretMounts: [ ] - # - name: secret-files - # mountPath: /etc/secrets - # subPath: "" - # secretName: prom-secret-files - # readOnly: true - - ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} - ## Defining configMapOverrideName will cause templates/server-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configMapOverrideName: "" - - ingress: - ## If true, Prometheus server Ingress will be created - ## - enabled: false - - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - ## Prometheus server Ingress annotations - ## - annotations: { } - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## Prometheus server Ingress additional labels - ## - extraLabels: { } - - ## Prometheus server Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [ ] - # - prometheus.domain.com - # - domain.com/prometheus - - path: / - - # pathType is only for k8s >= 1.18 - pathType: Prefix - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [ ] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## Prometheus server Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [ ] - # - secretName: prometheus-server-tls - # hosts: - # - prometheus.domain.com - - ## Server Deployment Strategy type - # strategy: - # type: Recreate - - ## hostAliases allows adding entries to /etc/hosts inside the containers - hostAliases: [ ] - # - ip: "127.0.0.1" - # hostnames: - # - "example.com" - - ## Node tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [ ] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for Prometheus server pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: { } - - ## Pod affinity - ## - affinity: { } - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - persistentVolume: - ## If true, Prometheus server will create/use a Persistent Volume Claim - ## If false, use emptyDir - ## - enabled: true - - ## Prometheus server data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## Prometheus server data Persistent Volume annotations - ## - annotations: { } - - ## Prometheus server data Persistent Volume existing claim name - ## Requires server.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## Prometheus server data Persistent Volume mount root path - ## - mountPath: /data - - ## Prometheus server data Persistent Volume size - ## - size: 10Gi - - ## Prometheus server data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## Prometheus server data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of Prometheus server data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - ## Persistent Volume Claim Selector - ## Useful if Persistent Volumes have been provisioned in advance - ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector - ## - # selector: - # matchLabels: - # release: "stable" - # matchExpressions: - # - { key: environment, operator: In, values: [ dev ] } - - emptyDir: - ## Prometheus server emptyDir volume size limit - ## - sizeLimit: "" - - ## Annotations to be added to Prometheus server pods - ## - podAnnotations: { } - # iam.amazonaws.com/role: prometheus - - ## Labels to be added to Prometheus server pods - ## - podLabels: { } - - ## Prometheus AlertManager configuration - ## - alertmanagers: [ ] - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: { } - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) - ## - replicaCount: 1 - - ## Annotations to be added to deployment - ## - deploymentAnnotations: { } - - statefulSet: - ## If true, use a statefulset instead of a deployment for pod management. - ## This allows to scale replicas to more than 1 pod - ## - enabled: false - - annotations: { } - labels: { } - podManagementPolicy: OrderedReady - - ## Alertmanager headless service to use for the statefulset - ## - headless: - annotations: { } - labels: { } - servicePort: 80 - ## Enable gRPC port on service to allow auto discovery with thanos-querier - gRPC: - enabled: false - servicePort: 10901 - # nodePort: 10901 - - ## Prometheus server readiness and liveness probe initial delay and timeout - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ - ## - tcpSocketProbeEnabled: false - probeScheme: HTTP - readinessProbeInitialDelay: 30 - readinessProbePeriodSeconds: 5 - readinessProbeTimeout: 4 - readinessProbeFailureThreshold: 3 - readinessProbeSuccessThreshold: 1 - livenessProbeInitialDelay: 30 - livenessProbePeriodSeconds: 15 - livenessProbeTimeout: 10 - livenessProbeFailureThreshold: 3 - livenessProbeSuccessThreshold: 1 - startupProbe: - enabled: false - periodSeconds: 5 - failureThreshold: 30 - timeoutSeconds: 10 - - ## Prometheus server resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: { } - # limits: - # cpu: 500m - # memory: 512Mi - # requests: - # cpu: 500m - # memory: 512Mi - - # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), - # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working - ## - hostNetwork: false - - # When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet - dnsPolicy: ClusterFirst - - ## Vertical Pod Autoscaler config - ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler - verticalAutoscaler: - ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) - enabled: false - # updateMode: "Auto" - # containerPolicies: - # - containerName: 'prometheus-server' - - # Custom DNS configuration to be added to prometheus server pods - dnsConfig: { } - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - ## Security context to be added to server pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - runAsGroup: 65534 - fsGroup: 65534 - - service: - annotations: { } - labels: { } - clusterIP: "" - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [ ] - - loadBalancerIP: "" - loadBalancerSourceRanges: [ ] - servicePort: 80 - nodePort: 31111 - sessionAffinity: None - type: NodePort - - ## Enable gRPC port on service to allow auto discovery with thanos-querier - gRPC: - enabled: false - servicePort: 10901 - # nodePort: 10901 - - ## If using a statefulSet (statefulSet.enabled=true), configure the - ## service to connect to a specific replica to have a consistent view - ## of the data. - statefulsetReplica: - enabled: false - replica: 0 - - ## Prometheus server pod termination grace period - ## - terminationGracePeriodSeconds: 300 - - ## Prometheus data retention period (default if not specified is 15 days) - ## - retention: "15d" - - serverFiles: - ## Alerts configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ - alerting_rules.yml: {} - # groups: - # - name: Instances - # rules: - # - alert: InstanceDown - # expr: up == 0 - # for: 5m - # labels: - # severity: page - # annotations: - # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' - # summary: 'Instance {{ $labels.instance }} down' - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml - alerts: {} - - ## Records configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ - recording_rules.yml: - groups: - - name: costs.rules - interval: 3600s - rules: - - expr: | - sum(label_replace(irate(container_cpu_usage_seconds_total{container!="POD", container!="",image!=""}[1h]), "node", "$1", "instance", "(.*)")) by (container, pod, node, namespace) * on (node) group_left() avg(avg_over_time(node_cpu_hourly_cost[1h])) by (node) - record: namespace:container_cpu_usage_costs_hourly:sum_rate - - expr: | - sum(label_replace(avg_over_time(container_memory_working_set_bytes{container!="POD",container!="",image!=""}[1h]), "node", "$1", "instance", "(.*)")) by (container, pod, node, namespace) / 1024.0 / 1024.0 / 1024.0 * on (node) group_left() avg(avg_over_time(node_ram_hourly_cost[1h])) by (node) - record: namespace:container_memory_usage_costs_hourly:sum_rate - - expr: | - avg(avg_over_time(node_cpu_hourly_cost[1h])) by (node) - record: node:node_cpu_hourly_cost:avg - - expr: | - avg(avg_over_time(node_ram_hourly_cost[1h])) by (node) - record: node:node_ram_hourly_cost:avg - - expr: | - avg(avg_over_time(node_total_hourly_cost[1h])) by (node) - record: node:node_total_hourly_cost:avg - - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml - rules: {} - - # adds additional scrape configs to prometheus.yml - # must be a string so you have to add a | after extraScrapeConfigs: - # example adds prometheus-blackbox-exporter scrape config - extraScrapeConfigs: |- - - job_name: 'node-exporter' - kubernetes_sd_configs: - - role: node - relabel_configs: - - source_labels: [ __address__ ] - regex: '(.*):10250' - replacement: '${1}:9100' - target_label: __address__ - action: replace - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - - job_name: 'kube-state-metrics' - static_configs: - - targets: [ 'kube-state-metrics.crane-system.svc.cluster.local:8080' ] - - # this is used to scrape cost exporter - - job_name: "fadvisor-cost-exporter" - honor_timestamps: true - scheme: http - metrics_path: /metrics - static_configs: - - targets: [ 'cost-exporter.crane-system.svc.cluster.local:8081' ] - - -grafana: - enabled: true - service: - enabled: true - type: NodePort - nodePort: 31112 - port: 80 - targetPort: 3000 - # targetPort: 4181 To be used with a proxy extraContainer - annotations: { } - labels: { } - portName: service - resources: { } - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - - ## Node labels for pod assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - # - nodeSelector: {} - - ## Tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] diff --git a/deploy/integration/monitoring/grafana/deployment.yaml b/deploy/integration/monitoring/grafana/deployment.yaml deleted file mode 100644 index dac2415..0000000 --- a/deploy/integration/monitoring/grafana/deployment.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: grafana - name: grafana - namespace: crane-monitoring -spec: - selector: - matchLabels: - app: grafana - template: - metadata: - labels: - app: grafana - spec: - securityContext: - fsGroup: 472 - supplementalGroups: - - 0 - containers: - - name: grafana - image: grafana/grafana:7.5.2 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 3000 - name: http-grafana - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: /robots.txt - port: 3000 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 2 - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - tcpSocket: - port: 3000 - timeoutSeconds: 1 - resources: - requests: - cpu: 250m - memory: 750Mi - volumeMounts: - - mountPath: /var/lib/grafana - name: grafana-pv - volumes: - - name: grafana-pv - persistentVolumeClaim: - claimName: grafana-pvc \ No newline at end of file diff --git a/deploy/integration/monitoring/grafana/pvc.yaml b/deploy/integration/monitoring/grafana/pvc.yaml deleted file mode 100644 index 9909525..0000000 --- a/deploy/integration/monitoring/grafana/pvc.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: grafana-bak-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi \ No newline at end of file diff --git a/deploy/integration/monitoring/grafana/service.yaml b/deploy/integration/monitoring/grafana/service.yaml deleted file mode 100644 index c9eda31..0000000 --- a/deploy/integration/monitoring/grafana/service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - namespace: crane-monitoring - name: grafana-bak -spec: - ports: - - port: 3000 - protocol: TCP - targetPort: http-grafana-bak - selector: - app: grafana-bak - sessionAffinity: None - type: LoadBalancer \ No newline at end of file diff --git a/deploy/integration/monitoring/kube-state-metrics/deployment.yaml b/deploy/integration/monitoring/kube-state-metrics/deployment.yaml deleted file mode 100644 index c15f1a4..0000000 --- a/deploy/integration/monitoring/kube-state-metrics/deployment.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: v1.8.0 - name: kube-state-metrics - namespace: crane-monitoring -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: kube-state-metrics - template: - metadata: - labels: - app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: v1.8.0 - spec: - serviceAccountName: kube-state-metrics - containers: - - image: quay.io/coreos/kube-state-metrics:v1.8.0 - livenessProbe: - httpGet: - path: /healthz - port: 8080 - initialDelaySeconds: 5 - timeoutSeconds: 5 - name: kube-state-metrics - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 8081 - name: telemetry - readinessProbe: - httpGet: - path: / - port: 8081 - initialDelaySeconds: 5 - timeoutSeconds: 5 - nodeSelector: - kubernetes.io/os: linux \ No newline at end of file diff --git a/deploy/integration/monitoring/kube-state-metrics/rbac.yaml b/deploy/integration/monitoring/kube-state-metrics/rbac.yaml deleted file mode 100644 index b289b06..0000000 --- a/deploy/integration/monitoring/kube-state-metrics/rbac.yaml +++ /dev/null @@ -1,96 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-state-metrics - namespace: crane-monitoring ---- - -apiVersion: rbac.authorization.k8s.io/v1 -# kubernetes versions before 1.8.0 should use rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - namespace: crane-monitoring - name: kube-state-metrics-resizer -rules: - - apiGroups: [""] - resources: - - pods - verbs: ["get"] - - apiGroups: ["extensions"] - resources: - - deployments - resourceNames: ["kube-state-metrics"] - verbs: ["get", "update"] ---- - -apiVersion: rbac.authorization.k8s.io/v1 -# kubernetes versions before 1.8.0 should use rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: kube-state-metrics -rules: - - apiGroups: [""] - resources: - - configmaps - - secrets - - nodes - - pods - - services - - resourcequotas - - replicationcontrollers - - limitranges - - persistentvolumeclaims - - persistentvolumes - - namespaces - - endpoints - verbs: ["list", "watch"] - - apiGroups: ["extensions"] - resources: - - daemonsets - - deployments - - replicasets - verbs: ["list", "watch"] - - apiGroups: ["apps"] - resources: - - statefulsets - verbs: ["list", "watch"] - - apiGroups: ["batch"] - resources: - - cronjobs - - jobs - verbs: ["list", "watch"] - - apiGroups: ["autoscaling"] - resources: - - horizontalpodautoscalers - verbs: ["list", "watch"] ---- - -apiVersion: rbac.authorization.k8s.io/v1 -# kubernetes versions before 1.8.0 should use rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: kube-state-metrics - namespace: crane-monitoring -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kube-state-metrics-resizer -subjects: - - kind: ServiceAccount - name: kube-state-metrics - namespace: crane-monitoring ---- - -apiVersion: rbac.authorization.k8s.io/v1 -# kubernetes versions before 1.8.0 should use rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: kube-state-metrics -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-state-metrics -subjects: - - kind: ServiceAccount - name: kube-state-metrics - namespace: crane-monitoring diff --git a/deploy/integration/monitoring/kube-state-metrics/service.yaml b/deploy/integration/monitoring/kube-state-metrics/service.yaml deleted file mode 100644 index 020ef15..0000000 --- a/deploy/integration/monitoring/kube-state-metrics/service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/name: kube-state-metrics - app.kubernetes.io/version: v1.8.0 - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '8080' - name: kube-state-metrics - namespace: crane-monitoring -spec: - clusterIP: None - ports: - - name: http-metrics - port: 8080 - targetPort: http-metrics - - name: telemetry - port: 8081 - targetPort: telemetry - selector: - app.kubernetes.io/name: kube-state-metrics \ No newline at end of file diff --git a/deploy/integration/monitoring/node-exporter/daemonset.yaml b/deploy/integration/monitoring/node-exporter/daemonset.yaml deleted file mode 100644 index d2abe5f..0000000 --- a/deploy/integration/monitoring/node-exporter/daemonset.yaml +++ /dev/null @@ -1,72 +0,0 @@ -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: node-exporter - namespace: crane-monitoring - labels: - app: node-exporter -spec: - selector: - matchLabels: - app: node-exporter - template: - metadata: - labels: - app: node-exporter - spec: - hostPID: true - hostIPC: true - hostNetwork: true - nodeSelector: - kubernetes.io/os: linux - containers: - - name: node-exporter - image: prom/node-exporter:v1.1.1 - args: - - --web.listen-address=$(HOSTIP):9100 - - --path.procfs=/host/proc - - --path.sysfs=/host/sys - - --path.rootfs=/host/root - - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) - - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ - ports: - - containerPort: 9100 - env: - - name: HOSTIP - valueFrom: - fieldRef: - fieldPath: status.hostIP - resources: - requests: - cpu: 150m - memory: 180Mi - limits: - cpu: 150m - memory: 180Mi - securityContext: - runAsNonRoot: true - runAsUser: 65534 - volumeMounts: - - name: proc - mountPath: /host/proc - - name: sys - mountPath: /host/sys - - name: root - mountPath: /host/root - mountPropagation: HostToContainer - readOnly: true - tolerations: - - operator: "Exists" - volumes: - - name: proc - hostPath: - path: /proc - - name: dev - hostPath: - path: /dev - - name: sys - hostPath: - path: /sys - - name: root - hostPath: - path: / \ No newline at end of file diff --git a/deploy/integration/monitoring/node-exporter/service.yaml b/deploy/integration/monitoring/node-exporter/service.yaml deleted file mode 100644 index 38c8df3..0000000 --- a/deploy/integration/monitoring/node-exporter/service.yaml +++ /dev/null @@ -1,17 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: node-exporter - namespace: crane-monitoring - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '9100' -spec: - selector: - app.kubernetes.io/component: exporter - app.kubernetes.io/name: node-exporter - ports: - - name: node-exporter - protocol: TCP - port: 9100 - targetPort: 9100 \ No newline at end of file diff --git a/deploy/integration/monitoring/prometheus/configmap.yaml b/deploy/integration/monitoring/prometheus/configmap.yaml deleted file mode 100644 index d6b485a..0000000 --- a/deploy/integration/monitoring/prometheus/configmap.yaml +++ /dev/null @@ -1,162 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: prometheus-server-conf - labels: - name: prometheus-server-conf - namespace: crane-monitoring -data: - prometheus.rules: |- - groups: - - name: devopscube demo alert - rules: - - alert: High Pod Memory - expr: sum(container_memory_usage_bytes) > 1 - for: 1m - labels: - severity: slack - annotations: - summary: High Memory Usage - prometheus.yml: |- - global: - scrape_interval: 5s - evaluation_interval: 5s - rule_files: - - /etc/prometheus/prometheus.rules - alerting: - alertmanagers: - - scheme: http - static_configs: - - targets: - - "alertmanager.monitoring.svc:9093" - - scrape_configs: - - - job_name: 'kubelet' - kubernetes_sd_configs: - - role: node - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - - job_name: 'kubernetes-apiservers' - kubernetes_sd_configs: - - role: endpoints - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - relabel_configs: - - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] - action: keep - regex: default;kubernetes;https - - - job_name: 'kubernetes-pods' - kubernetes_sd_configs: - - role: pod - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: kubernetes_pod_name - - - job_name: 'kubernetes-endpoints' - kubernetes_sd_configs: - - role: endpoints - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: ([^:]+)(?::\d+)?;(\d+) # RE2 正则规则,+是一次多多次,?是0次或1次,其中?:表示非匹配组(意思就是不获取匹配结果) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: kubernetes_name - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: kubernetes_pod_name - - - - job_name: 'kubernetes-cadvisor' - kubernetes_sd_configs: - - role: node - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - replacement: $1 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - replacement: /metrics/cadvisor # /metrics -> /metrics/cadvisor - target_label: __metrics_path__ - # 下面的方式不推荐使用 - # - target_label: __address__ - # replacement: kubernetes.default.svc:443 - # - source_labels: [__meta_kubernetes_node_name] - # regex: (.+) - # target_label: __metrics_path__ - # replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor - - - job_name: 'node-exporter' - kubernetes_sd_configs: - - role: node - relabel_configs: - - source_labels: [__address__] - regex: '(.*):10250' - replacement: '${1}:9100' - target_label: __address__ - action: replace - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - - job_name: 'kube-state-metrics' - static_configs: - - targets: ['kube-state-metrics.crane-monitoring.svc.cluster.local:8080'] - - - - job_name: "fadvisor-cost-exporter" - scheme: http - metrics_path: /metrics - static_configs: - - targets: ['fadvisor-cost-exporter.crane-system.svc.cluster.local:8081'] \ No newline at end of file diff --git a/deploy/integration/monitoring/prometheus/deployment.yaml b/deploy/integration/monitoring/prometheus/deployment.yaml deleted file mode 100644 index 053da61..0000000 --- a/deploy/integration/monitoring/prometheus/deployment.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: prometheus-server - namespace: crane-monitoring - labels: - app: prometheus-server -spec: - replicas: 1 - selector: - matchLabels: - app: prometheus-server - template: - metadata: - labels: - app: prometheus-server - spec: - serviceAccountName: prometheus - securityContext: - runAsUser: 0 - containers: - - name: prometheus - image: prom/prometheus - args: - - "--config.file=/etc/prometheus/prometheus.yml" - - "--storage.tsdb.path=/prometheus/" - - "--web.enable-lifecycle" - ports: - - containerPort: 9090 - volumeMounts: - - name: prometheus-config-volume - mountPath: /etc/prometheus/ - - name: prometheus-storage-volume - mountPath: /prometheus/ - volumes: - - name: prometheus-config-volume - configMap: - defaultMode: 420 - name: prometheus-server-conf - - - name: prometheus-storage-volume - persistentVolumeClaim: - claimName: prometheus-data \ No newline at end of file diff --git a/deploy/integration/monitoring/prometheus/pvc.yaml b/deploy/integration/monitoring/prometheus/pvc.yaml deleted file mode 100644 index 36e8da9..0000000 --- a/deploy/integration/monitoring/prometheus/pvc.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: prometheus-data - namespace: crane-monitoring -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi \ No newline at end of file diff --git a/deploy/integration/monitoring/prometheus/rbac.yaml b/deploy/integration/monitoring/prometheus/rbac.yaml deleted file mode 100644 index 5100fcf..0000000 --- a/deploy/integration/monitoring/prometheus/rbac.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: prometheus - namespace: crane-monitoring ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: prometheus -rules: - - apiGroups: - - "" - resources: - - nodes - - services - - endpoints - - pods - - nodes/proxy - verbs: - - get - - list - - watch - - apiGroups: - - "extensions" - resources: - - ingresses - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - configmaps - - nodes/metrics - verbs: - - get - - nonResourceURLs: - - /metrics - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: prometheus -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: prometheus -subjects: - - kind: ServiceAccount - name: prometheus - namespace: crane-monitoring diff --git a/deploy/integration/monitoring/prometheus/service.yaml b/deploy/integration/monitoring/prometheus/service.yaml deleted file mode 100644 index 5db2186..0000000 --- a/deploy/integration/monitoring/prometheus/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: prometheus-service - namespace: crane-monitoring - annotations: - prometheus.io/scrape: 'true' - prometheus.io/port: '9090' -spec: - selector: - app: prometheus-server - type: ClusterIP - ports: - - port: 80 - targetPort: 9090 \ No newline at end of file diff --git a/pkg/consts/const.go b/pkg/consts/const.go index b9bc81c..e88d698 100644 --- a/pkg/consts/const.go +++ b/pkg/consts/const.go @@ -5,7 +5,7 @@ import "time" const ( CraneNamespace = "crane-system" - CostExporterName = "cost-exporter" + CostExporterName = "fadvisor" // DefaultLeaseDuration is the default LeaseDuration for leader election. DefaultLeaseDuration = 15 * time.Second