From 52e6d8c0e4f66385b9e3b4e4fde2441257087332 Mon Sep 17 00:00:00 2001 From: Branden Rolston Date: Tue, 10 Dec 2019 16:02:22 -0800 Subject: [PATCH] Upgrade prometheus-operator to 8.3.3 --- staging/prometheus-operator/CONTRIBUTING.md | 1 - staging/prometheus-operator/Chart.yaml | 10 +- staging/prometheus-operator/OWNERS | 8 +- staging/prometheus-operator/README.md | 534 ++-- .../charts/grafana-3.5.10.tgz | Bin 15945 -> 0 bytes .../charts/grafana-4.0.5.tgz | Bin 0 -> 18379 bytes .../charts/kube-state-metrics-2.4.1.tgz | Bin 5759 -> 5753 bytes .../charts/prometheus-node-exporter-1.5.1.tgz | Bin 5999 -> 0 bytes .../charts/prometheus-node-exporter-1.7.3.tgz | Bin 0 -> 6072 bytes .../ci/01-provision-crds-values.yaml | 42 + .../ci/02-test-without-crds-values.yaml | 6 + .../prometheus-operator/ci/test-values.yaml | 1604 ---------- .../crd-alertmanager.yaml | 18 +- .../crd-podmonitor.yaml | 12 +- .../crd-prometheus.yaml | 15 +- .../crd-prometheusrules.yaml | 12 +- .../crd-servicemonitor.yaml | 12 +- .../dependent-charts/grafana/.helmignore | 0 .../dependent-charts/grafana/Chart.yaml | 0 .../dependent-charts/grafana/README.md | 0 .../grafana/dashboards/custom-dashboard.json | 0 .../grafana/templates/NOTES.txt | 0 .../grafana/templates/_helpers.tpl | 0 .../grafana/templates/clusterrole.yaml | 0 .../grafana/templates/clusterrolebinding.yaml | 0 .../configmap-dashboard-provider.yaml | 0 .../grafana/templates/configmap.yaml | 0 .../templates/dashboards-json-configmap.yaml | 0 .../grafana/templates/deployment.yaml | 0 .../grafana/templates/ingress.yaml | 0 .../grafana/templates/podsecuritypolicy.yaml | 0 .../grafana/templates/pvc.yaml | 0 .../grafana/templates/role.yaml | 0 .../grafana/templates/rolebinding.yaml | 0 .../grafana/templates/secret.yaml | 0 .../grafana/templates/service.yaml | 0 .../grafana/templates/serviceaccount.yaml | 0 .../templates/tests/test-configmap.yaml | 0 .../tests/test-podsecuritypolicy.yaml | 0 .../grafana/templates/tests/test-role.yaml | 0 .../templates/tests/test-rolebinding.yaml | 0 .../templates/tests/test-serviceaccount.yaml | 0 .../grafana/templates/tests/test.yaml | 0 .../dependent-charts/grafana/values.yaml | 0 .../kube-state-metrics/.helmignore | 0 .../kube-state-metrics/Chart.yaml | 0 .../kube-state-metrics/OWNERS | 0 .../kube-state-metrics/README.md | 0 .../kube-state-metrics/templates/NOTES.txt | 0 .../kube-state-metrics/templates/_helpers.tpl | 0 .../templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../templates/deployment.yaml | 0 .../templates/podsecuritypolicy.yaml | 0 .../templates/psp-clusterrole.yaml | 0 .../templates/psp-clusterrolebinding.yaml | 0 .../kube-state-metrics/templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../templates/servicemonitor.yaml | 0 .../kube-state-metrics/values.yaml | 0 .../prometheus-node-exporter/.helmignore | 0 .../prometheus-node-exporter/Chart.yaml | 0 .../prometheus-node-exporter/OWNERS | 0 .../prometheus-node-exporter/README.md | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 .../templates/daemonset.yaml | 0 .../templates/endpoints.yaml | 0 .../templates/monitor.yaml | 0 .../templates/psp-clusterrole.yaml | 0 .../templates/psp-clusterrolebinding.yaml | 0 .../templates/psp.yaml | 0 .../templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../prometheus-node-exporter/values.yaml | 0 staging/prometheus-operator/hack/README.md | 10 +- .../prometheus-operator/hack/requirements.txt | 2 + .../hack/sync_grafana_dashboards.py | 43 +- .../hack/sync_prometheus_rules.py | 44 +- staging/prometheus-operator/hack/update-ci.sh | 4 - staging/prometheus-operator/requirements.lock | 14 +- staging/prometheus-operator/requirements.yaml | 12 +- .../prometheus-operator/templates/NOTES.txt | 2 +- .../templates/_helpers.tpl | 13 +- .../templates/alertmanager/alertmanager.yaml | 11 +- .../templates/alertmanager/ingress.yaml | 3 +- .../alertmanager/podDisruptionBudget.yaml | 3 +- .../alertmanager/psp-clusterrolebinding.yaml | 2 +- .../templates/alertmanager/psp.yaml | 1 + .../templates/alertmanager/secret.yaml | 5 + .../templates/alertmanager/service.yaml | 8 +- .../alertmanager/serviceaccount.yaml | 1 + .../alertmanager/servicemonitor.yaml | 9 +- .../templates/exporters/core-dns/service.yaml | 8 +- .../exporters/core-dns/servicemonitor.yaml | 5 +- .../kube-api-server/servicemonitor.yaml | 3 +- .../kube-controller-manager/endpoints.yaml | 4 +- .../kube-controller-manager/service.yaml | 8 +- .../servicemonitor.yaml | 5 +- .../templates/exporters/kube-dns/service.yaml | 16 +- .../exporters/kube-dns/servicemonitor.yaml | 13 +- .../exporters/kube-etcd/endpoints.yaml | 2 +- .../exporters/kube-etcd/service.yaml | 8 +- .../exporters/kube-etcd/servicemonitor.yaml | 5 +- .../exporters/kube-proxy/endpoints.yaml | 20 + .../exporters/kube-proxy/service.yaml | 10 +- .../exporters/kube-proxy/servicemonitor.yaml | 5 +- .../exporters/kube-scheduler/endpoints.yaml | 2 +- .../exporters/kube-scheduler/service.yaml | 8 +- .../kube-scheduler/servicemonitor.yaml | 5 +- .../kube-state-metrics/serviceMonitor.yaml | 9 +- .../exporters/kubelet/servicemonitor.yaml | 3 +- .../node-exporter/servicemonitor.yaml | 8 +- .../grafana/configmap-dashboards.yaml | 1 + .../grafana/configmaps-datasources.yaml | 7 +- .../grafana/dashboards-1.14/apiserver.yaml | 1298 +++++++++ .../dashboards-1.14/cluster-total.yaml | 1391 +++++++++ .../dashboards-1.14/controller-manager.yaml | 1128 +++++++ .../grafana/dashboards-1.14/etcd.yaml | 1111 +++++++ .../k8s-coredns.yaml | 33 +- .../k8s-resources-cluster.yaml | 2587 +++++++++++++++++ .../k8s-resources-namespace.yaml | 1875 ++++++++++++ .../dashboards-1.14/k8s-resources-node.yaml | 958 ++++++ .../dashboards-1.14/k8s-resources-pod.yaml | 1624 +++++++++++ .../k8s-resources-workload.yaml | 2044 +++++++++++++ .../k8s-resources-workloads-namespace.yaml | 2130 ++++++++++++++ .../grafana/dashboards-1.14/kubelet.yaml | 2492 ++++++++++++++++ .../dashboards-1.14/namespace-by-pod.yaml | 1415 +++++++++ .../namespace-by-workload.yaml | 1455 +++++++++ .../node-cluster-rsrc-use.yaml | 959 ++++++ .../dashboards-1.14/node-rsrc-use.yaml | 986 +++++++ .../grafana/dashboards-1.14/nodes.yaml | 982 +++++++ .../persistentvolumesusage.yaml | 568 ++++ .../grafana/dashboards-1.14/pod-total.yaml | 1183 ++++++++ .../grafana/dashboards-1.14/pods.yaml | 675 +++++ .../prometheus-remote-write.yaml | 1395 +++++++++ .../grafana/dashboards-1.14/prometheus.yaml | 1217 ++++++++ .../grafana/dashboards-1.14/proxy.yaml | 1206 ++++++++ .../grafana/dashboards-1.14/scheduler.yaml | 1053 +++++++ .../grafana/dashboards-1.14/statefulset.yaml | 921 ++++++ .../dashboards-1.14/workload-total.yaml | 1161 ++++++++ .../templates/grafana/dashboards/etcd.yaml | 3 +- .../dashboards/k8s-cluster-rsrc-use.yaml | 25 +- .../grafana/dashboards/k8s-node-rsrc-use.yaml | 7 +- .../dashboards/k8s-resources-cluster.yaml | 19 +- .../dashboards/k8s-resources-namespace.yaml | 11 +- .../grafana/dashboards/k8s-resources-pod.yaml | 7 +- .../dashboards/k8s-resources-workload.yaml | 11 +- .../k8s-resources-workloads-namespace.yaml | 11 +- .../templates/grafana/dashboards/nodes.yaml | 5 +- .../dashboards/persistentvolumesusage.yaml | 5 +- .../templates/grafana/dashboards/pods.yaml | 5 +- .../grafana/dashboards/statefulset.yaml | 5 +- .../templates/grafana/servicemonitor.yaml | 7 +- .../job-patch/clusterrole.yaml | 28 + .../job-patch/clusterrolebinding.yaml | 21 + .../job-patch/job-createSecret.yaml | 52 + .../job-patch/job-patchWebhook.yaml | 53 + .../admission-webhooks/job-patch/psp.yaml | 51 + .../admission-webhooks/job-patch/role.yaml | 21 + .../job-patch/rolebinding.yaml | 21 + .../job-patch/serviceaccount.yaml | 13 + .../mutatingWebhookConfiguration.yaml | 32 + .../validatingWebhookConfiguration.yaml | 32 + .../prometheus-operator/cleanup-crds.yaml | 2 +- .../prometheus-operator/clusterrole.yaml | 3 +- .../clusterrolebinding.yaml | 2 +- .../templates/prometheus-operator/crds.yaml | 6 + .../prometheus-operator/deployment.yaml | 49 +- .../psp-clusterrolebinding.yaml | 2 +- .../templates/prometheus-operator/psp.yaml | 1 + .../prometheus-operator/service.yaml | 14 +- .../prometheus-operator/serviceaccount.yaml | 1 + .../prometheus-operator/servicemonitor.yaml | 7 +- .../additionalAlertRelabelConfigs.yaml | 3 +- .../additionalAlertmanagerConfigs.yaml | 3 +- .../prometheus/additionalPrometheusRules.yaml | 6 +- .../prometheus/additionalScrapeConfigs.yaml | 3 +- .../templates/prometheus/clusterrole.yaml | 3 +- .../prometheus/clusterrolebinding.yaml | 2 +- .../templates/prometheus/ingress.yaml | 3 +- .../prometheus/ingressperreplica.yaml | 45 + .../prometheus/podDisruptionBudget.yaml | 3 +- .../templates/prometheus/podmonitors.yaml | 37 + .../templates/prometheus/prometheus.yaml | 44 +- .../prometheus/psp-clusterrolebinding.yaml | 2 +- .../templates/prometheus/psp.yaml | 1 + .../rules-1.14/alertmanager.rules.yaml | 51 + .../templates/prometheus/rules-1.14/etcd.yaml | 137 + .../prometheus/rules-1.14/general.rules.yaml | 47 + .../prometheus/rules-1.14/k8s.rules.yaml | 96 + .../rules-1.14/kube-apiserver.rules.yaml | 36 + .../kube-prometheus-node-recording.rules.yaml | 38 + .../rules-1.14/kube-scheduler.rules.yaml | 60 + .../rules-1.14/kubernetes-absent.yaml | 126 + .../rules-1.14/kubernetes-apps.yaml | 189 ++ .../rules-1.14/kubernetes-resources.yaml | 100 + .../rules-1.14/kubernetes-storage.yaml | 59 + .../kubernetes-system-apiserver.yaml | 108 + .../kubernetes-system-controller-manager.yaml | 34 + .../rules-1.14/kubernetes-system-kubelet.yaml | 57 + .../kubernetes-system-scheduler.yaml | 34 + .../rules-1.14/kubernetes-system.yaml | 44 + .../rules-1.14/node-exporter.rules.yaml | 76 + .../prometheus/rules-1.14/node-exporter.yaml | 162 ++ .../prometheus/rules-1.14/node-network.yaml | 31 + .../prometheus/rules-1.14/node-time.yaml | 31 + .../prometheus/rules-1.14/node.rules.yaml | 46 + .../rules-1.14/prometheus-operator.yaml | 40 + .../prometheus/rules-1.14/prometheus.yaml | 199 ++ .../prometheus/rules/alertmanager.rules.yaml | 9 +- .../templates/prometheus/rules/etcd.yaml | 5 +- .../prometheus/rules/general.rules.yaml | 7 +- .../templates/prometheus/rules/k8s.rules.yaml | 41 +- .../rules/kube-apiserver.rules.yaml | 7 +- .../kube-prometheus-node-alerting.rules.yaml | 7 +- .../kube-prometheus-node-recording.rules.yaml | 7 +- .../rules/kube-scheduler.rules.yaml | 7 +- .../prometheus/rules/kubernetes-absent.yaml | 7 +- .../prometheus/rules/kubernetes-apps.yaml | 7 +- .../rules/kubernetes-resources.yaml | 7 +- .../prometheus/rules/kubernetes-storage.yaml | 7 +- .../prometheus/rules/kubernetes-system.yaml | 9 +- .../prometheus/rules/node-network.yaml | 7 +- .../templates/prometheus/rules/node-time.yaml | 7 +- .../prometheus/rules/node.rules.yaml | 7 +- .../prometheus/rules/prometheus-operator.yaml | 7 +- .../prometheus/rules/prometheus.rules.yaml | 7 +- .../templates/prometheus/service.yaml | 9 +- .../templates/prometheus/serviceaccount.yaml | 5 + .../templates/prometheus/servicemonitor.yaml | 13 +- .../templates/prometheus/servicemonitors.yaml | 3 +- .../prometheus/serviceperreplica.yaml | 46 + staging/prometheus-operator/values.yaml | 344 ++- 234 files changed, 37324 insertions(+), 2209 deletions(-) delete mode 100644 staging/prometheus-operator/charts/grafana-3.5.10.tgz create mode 100644 staging/prometheus-operator/charts/grafana-4.0.5.tgz delete mode 100644 staging/prometheus-operator/charts/prometheus-node-exporter-1.5.1.tgz create mode 100644 staging/prometheus-operator/charts/prometheus-node-exporter-1.7.3.tgz create mode 100644 staging/prometheus-operator/ci/01-provision-crds-values.yaml create mode 100644 staging/prometheus-operator/ci/02-test-without-crds-values.yaml delete mode 100644 staging/prometheus-operator/ci/test-values.yaml rename staging/prometheus-operator/{templates/prometheus-operator => crds}/crd-alertmanager.yaml (99%) rename staging/prometheus-operator/{templates/prometheus-operator => crds}/crd-podmonitor.yaml (95%) rename staging/prometheus-operator/{templates/prometheus-operator => crds}/crd-prometheus.yaml (99%) rename staging/prometheus-operator/{templates/prometheus-operator => crds}/crd-prometheusrules.yaml (97%) rename staging/prometheus-operator/{templates/prometheus-operator => crds}/crd-servicemonitor.yaml (96%) mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/.helmignore mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/Chart.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/README.md mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/dashboards/custom-dashboard.json mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/NOTES.txt mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/_helpers.tpl mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/clusterrole.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/clusterrolebinding.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/configmap-dashboard-provider.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/configmap.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/dashboards-json-configmap.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/deployment.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/ingress.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/podsecuritypolicy.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/pvc.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/role.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/rolebinding.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/secret.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/service.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/serviceaccount.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/tests/test-configmap.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/tests/test-podsecuritypolicy.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/tests/test-role.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/tests/test-rolebinding.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/tests/test-serviceaccount.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/templates/tests/test.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/grafana/values.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/.helmignore mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/Chart.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/OWNERS mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/README.md mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/templates/NOTES.txt mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/templates/_helpers.tpl mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/templates/clusterrole.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/templates/clusterrolebinding.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/templates/deployment.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/templates/podsecuritypolicy.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/templates/psp-clusterrole.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/templates/service.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/templates/serviceaccount.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/templates/servicemonitor.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/kube-state-metrics/values.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/.helmignore mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/Chart.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/OWNERS mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/README.md mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/templates/NOTES.txt mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/templates/_helpers.tpl mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/templates/daemonset.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/templates/endpoints.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/templates/monitor.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/templates/psp-clusterrole.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/templates/psp.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/templates/service.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/templates/serviceaccount.yaml mode change 100755 => 100644 staging/prometheus-operator/dependent-charts/prometheus-node-exporter/values.yaml create mode 100644 staging/prometheus-operator/hack/requirements.txt delete mode 100755 staging/prometheus-operator/hack/update-ci.sh create mode 100644 staging/prometheus-operator/templates/exporters/kube-proxy/endpoints.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/apiserver.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/cluster-total.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/controller-manager.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/etcd.yaml rename staging/prometheus-operator/templates/grafana/{dashboards => dashboards-1.14}/k8s-coredns.yaml (97%) create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/k8s-resources-node.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/kubelet.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/namespace-by-pod.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/namespace-by-workload.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/node-rsrc-use.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/nodes.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/pod-total.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/pods.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/prometheus.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/proxy.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/scheduler.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/statefulset.yaml create mode 100644 staging/prometheus-operator/templates/grafana/dashboards-1.14/workload-total.yaml create mode 100644 staging/prometheus-operator/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml create mode 100644 staging/prometheus-operator/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100644 staging/prometheus-operator/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml create mode 100644 staging/prometheus-operator/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100644 staging/prometheus-operator/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml create mode 100644 staging/prometheus-operator/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml create mode 100644 staging/prometheus-operator/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml create mode 100644 staging/prometheus-operator/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml create mode 100644 staging/prometheus-operator/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml create mode 100644 staging/prometheus-operator/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml create mode 100644 staging/prometheus-operator/templates/prometheus-operator/crds.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/ingressperreplica.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/podmonitors.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/alertmanager.rules.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/etcd.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/general.rules.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/k8s.rules.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kubernetes-absent.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kubernetes-apps.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kubernetes-resources.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kubernetes-storage.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/kubernetes-system.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/node-exporter.rules.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/node-exporter.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/node-network.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/node-time.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/node.rules.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/prometheus-operator.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/rules-1.14/prometheus.yaml create mode 100644 staging/prometheus-operator/templates/prometheus/serviceperreplica.yaml diff --git a/staging/prometheus-operator/CONTRIBUTING.md b/staging/prometheus-operator/CONTRIBUTING.md index 2fba4f200f..44533af6e5 100644 --- a/staging/prometheus-operator/CONTRIBUTING.md +++ b/staging/prometheus-operator/CONTRIBUTING.md @@ -3,7 +3,6 @@ 1. Fork this repository, develop and test your Chart. 1. Bump the chart version for every change. 1. Ensure PR title has the prefix `[stable/prometheus-operator]` -1. When making changes to values.yaml, update the files in `ci/` by running `hack/update-ci.sh` 1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories 1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes. 1. Check for changes of RBAC rules. diff --git a/staging/prometheus-operator/Chart.yaml b/staging/prometheus-operator/Chart.yaml index 694239a6b8..0f99cd8fb9 100644 --- a/staging/prometheus-operator/Chart.yaml +++ b/staging/prometheus-operator/Chart.yaml @@ -3,16 +3,18 @@ description: Provides easy monitoring definitions for Kubernetes services, and d and management of Prometheus instances. engine: gotpl maintainers: + - name: vsliouniaev + - name: bismarck - name: gianrubio email: gianrubio@gmail.com - - name: anothertobi - - name: vsliouniaev name: prometheus-operator sources: + - https://github.com/coreos/kube-prometheus - https://github.com/coreos/prometheus-operator - https://coreos.com/operators/prometheus -version: 5.19.7 -appVersion: 0.31.1 +version: 8.3.3 +appVersion: 0.34.0 +tillerVersion: ">=2.12.0" home: https://github.com/coreos/prometheus-operator icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png keywords: diff --git a/staging/prometheus-operator/OWNERS b/staging/prometheus-operator/OWNERS index 697dee9ccb..9231f2531d 100644 --- a/staging/prometheus-operator/OWNERS +++ b/staging/prometheus-operator/OWNERS @@ -1,8 +1,8 @@ approvers: -- gianrubio - vsliouniaev -- anothertobi -reviewers: +- bismarck - gianrubio +reviewers: - vsliouniaev -- anothertobi +- bismarck +- gianrubio diff --git a/staging/prometheus-operator/README.md b/staging/prometheus-operator/README.md index 6d9717930d..9065465617 100644 --- a/staging/prometheus-operator/README.md +++ b/staging/prometheus-operator/README.md @@ -33,7 +33,7 @@ This chart bootstraps a [prometheus-operator](https://github.com/coreos/promethe ## Prerequisites - Kubernetes 1.10+ with Beta APIs - - Helm 2.10+ (For a workaround using an earlier version see [below](#helm-210-workaround)) + - Helm 2.12+ (If using Helm < 2.14, [see below for CRD workaround](#Helm-fails-to-create-CRDs)) ## Installing the Chart @@ -69,8 +69,15 @@ kubectl delete crd alertmanagers.monitoring.coreos.com ## Work-Arounds for Known Issues +### Running on private GKE clusters +When Google configure the control plane for private clusters, they automatically configure VPC peering between your Kubernetes cluster’s network and a separate Google managed project. In order to restrict what Google are able to access within your cluster, the firewall rules configured restrict access to your Kubernetes pods. This means that in order to use the webhook component with a GKE private cluster, you must configure an additional firewall rule to allow the GKE control plane access to your webhook pod. + +You can read more information on how to add firewall rules for the GKE control plane nodes in the [GKE docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) + +Alternatively, you can disable the hooks by setting `prometheusOperator.admissionWebhooks.enabled=false`. + ### Helm fails to create CRDs -Due to a bug in helm, it is possible for the 4 CRDs that are created by this chart to fail to get fully deployed before Helm attempts to create resources that require them. This affects all versions of Helm with a [potential fix pending](https://github.com/helm/helm/pull/5112). In order to work around this issue when installing the chart you will need to make sure all 4 CRDs exist in the cluster first and disable their previsioning by the chart: +You should upgrade to Helm 2.14 + in order to avoid this issue. However, if you are stuck with an earlier Helm release you should instead use the following approach: Due to a bug in helm, it is possible for the 5 CRDs that are created by this chart to fail to get fully deployed before Helm attempts to create resources that require them. This affects all versions of Helm with a [potential fix pending](https://github.com/helm/helm/pull/5112). In order to work around this issue when installing the chart you will need to make sure all 5 CRDs exist in the cluster first and disable their previsioning by the chart: 1. Create CRDs ```console @@ -88,10 +95,36 @@ kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/ma $ helm install --name my-release stable/prometheus-operator --set prometheusOperator.createCustomResource=false ``` -### Helm <2.10 workaround -The `crd-install` hook is required to deploy the prometheus operator CRDs before they are used. If you are forced to use an earlier version of Helm you can work around this requirement as follows: -1. Install prometheus-operator by itself, disabling everything but the prometheus-operator component, and also setting `prometheusOperator.serviceMonitor.selfMonitor=false` -2. Install all the other components, and configure `prometheus.additionalServiceMonitors` to scrape the prometheus-operator service. +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### Upgrading from 7.x.x to 8.x.x +Due to new template functions being used in the rules in version 8.x.x of the chart, an upgrade to Prometheus Operator and Prometheus is necessary in order to support them. +First, upgrade to the latest version of 7.x.x +```sh +helm upgrade stable/prometheus-operator --version 7.4.0 +``` +Then upgrade to 8.x.x +```sh +helm upgrade stable/prometheus-operator +``` +Minimal recommended Prometheus version for this chart release is `2.12.x` + +### Upgrading from 6.x.x to 7.x.x +Due to a change in grafana subchart, version 7.x.x now requires Helm >= 2.12.0. + +### Upgrading from 5.x.x to 6.x.x +Due to a change in deployment labels of kube-state-metrics, the upgrade requires `helm upgrade --force` in order to re-create the deployment. If this is not done an error will occur indicating that the deployment cannot be modified: + +``` +invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app.kubernetes.io/name":"kube-state-metrics"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable +``` +If this error has already been encountered, a `helm history` command can be used to determine which release has worked, then `helm rollback` to the release, then `helm upgrade --force` to this new one + +## prometheus.io/scrape +The prometheus operator does not support annotation-based discovery of services, using the `serviceMonitor` CRD in its place as it provides far more configuration options. For information on how to use servicemonitors, please see the documentation on the coreos/prometheus-operator documentation here: [Running Exporters](https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) ## Configuration @@ -100,10 +133,13 @@ The following tables list the configurable parameters of the prometheus-operator ### General | Parameter | Description | Default | | ----- | ----------- | ------ | -| `nameOverride` | Provide a name in place of `prometheus-operator` |`""`| -| `fullNameOverride` | Provide a name to substitute for the full names of resources |`""`| +| `additionalPrometheusRulesMap` | Map of `prometheusRule` objects to create with the key used as the name of the rule spec. If defined, this will take precedence over `additionalPrometheusRules`. See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusrulespec. | `nil` | +| `additionalPrometheusRules` | *DEPRECATED* Will be removed in a future release. Please use **additionalPrometheusRulesMap** instead. List of `prometheusRule` objects to create. See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusrulespec. | `[]` | | `commonLabels` | Labels to apply to all resources | `[]` | +| `defaultRules.annotations` | Annotations for default rules for monitoring the cluster | `{}` | | `defaultRules.create` | Create default rules for monitoring the cluster | `true` | +| `defaultRules.labels` | Labels for default rules for monitoring the cluster | `{}` | +| `defaultRules.rules.PrometheusOperator` | Create Prometheus Operator default rules| `true` | | `defaultRules.rules.alertmanager` | Create default rules for Alert Manager | `true` | | `defaultRules.rules.etcd` | Create default rules for ETCD | `true` | | `defaultRules.rules.general` | Create General default rules| `true` | @@ -117,197 +153,245 @@ The following tables list the configurable parameters of the prometheus-operator | `defaultRules.rules.kubernetesResources` | Create Kubernetes Resources default rules| `true` | | `defaultRules.rules.kubernetesStorage` | Create Kubernetes Storage default rules| `true` | | `defaultRules.rules.kubernetesSystem` | Create Kubernetes System default rules| `true` | -| `defaultRules.rules.node` | Create Node default rules| `true` | -| `defaultRules.rules.PrometheusOperator` | Create Prometheus Operator default rules| `true` | +| `defaultRules.rules.network` | Create networking default rules | `true` | +| `defaultRules.rules.node` | Create Node default rules | `true` | | `defaultRules.rules.prometheus` | Create Prometheus default rules| `true` | -| `defaultRules.labels` | Labels for default rules for monitoring the cluster | `{}` | -| `defaultRules.annotations` | Annotations for default rules for monitoring the cluster | `{}` | -| `additionalPrometheusRules` | *DEPRECATED* Will be removed in a future release. Please use **additionalPrometheusRulesMap** instead. List of `prometheusRule` objects to create. See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusrulespec. | `[]` | -| `additionalPrometheusRulesMap` | Map of `prometheusRule` objects to create with the key used as the name of the rule spec. If defined, this will take precedence over `additionalPrometheusRules`. See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusrulespec. | `nil` | +| `defaultRules.rules.time` | Create time default rules | `true` | +| `fullnameOverride` | Provide a name to substitute for the full names of resources |`""`| +| `global.imagePullSecrets` | Reference to one or more secrets to be used when pulling images | `[]` | | `global.rbac.create` | Create RBAC resources | `true` | | `global.rbac.pspEnabled` | Create pod security policy resources | `true` | -| `global.imagePullSecrets` | Reference to one or more secrets to be used when pulling images | `[]` | +| `kubeTargetVersionOverride` | Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. `helm template`) |`""`| +| `nameOverride` | Provide a name in place of `prometheus-operator` |`""`| ### Prometheus Operator | Parameter | Description | Default | | ----- | ----------- | ------ | +| `prometheusOperator.admissionWebhooks.enabled` | Create PrometheusRules admission webhooks. Mutating webhook will patch PrometheusRules objects indicating they were validated. Validating webhook will check the rules syntax. | `true` | +| `prometheusOperator.admissionWebhooks.failurePolicy` | Failure policy for admission webhooks | `Fail` | +| `prometheusOperator.admissionWebhooks.patch.enabled` | If true, will use a pre and post install hooks to generate a CA and certificate to use for the prometheus operator tls proxy, and patch the created webhooks with the CA. | `true` | +| `prometheusOperator.admissionWebhooks.patch.image.pullPolicy` | Image pull policy for the webhook integration jobs | `IfNotPresent` | +| `prometheusOperator.admissionWebhooks.patch.image.repository` | Repository to use for the webhook integration jobs | `jettech/kube-webhook-certgen` | +| `prometheusOperator.admissionWebhooks.patch.image.tag` | Tag to use for the webhook integration jobs | `v1.0.0` | +| `prometheusOperator.admissionWebhooks.patch.resources` | Resource limits for admission webhook | `{}` | +| `prometheusOperator.admissionWebhooks.patch.nodeSelector` | Node selector for running admission hook patch jobs | `nil` | +| `prometheusOperator.admissionWebhooks.patch.podAnnotations` | Annotations for the webhook job pods | `nil` | +| `prometheusOperator.admissionWebhooks.patch.priorityClassName` | Priority class for the webhook integration jobs | `nil` | +| `prometheusOperator.affinity` | Assign custom affinity rules to the prometheus operator https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | `{}` | +| `prometheusOperator.cleanupCustomResource` | Attempt to delete CRDs when the release is removed. This option may be useful while testing but is not recommended, as deleting the CRD definition will delete resources and prevent the operator from being able to clean up resources that it manages | `false` | +| `prometheusOperator.configReloaderCpu` | Set the prometheus config reloader side-car CPU limit. If unset, uses the prometheus-operator project default | `nil` | +| `prometheusOperator.configReloaderMemory` | Set the prometheus config reloader side-car memory limit. If unset, uses the prometheus-operator project default | `nil` | +| `prometheusOperator.configmapReloadImage.repository` | Repository for configmapReload image | `quay.io/coreos/configmap-reload` | +| `prometheusOperator.configmapReloadImage.tag` | Tag for configmapReload image | `v0.0.1` | +| `prometheusOperator.createCustomResource` | Create CRDs. Required if deploying anything besides the operator itself as part of the release. The operator will create / update these on startup. If your Helm version < 2.10 you will have to either create the CRDs first or deploy the operator first, then the rest of the resources. Regardless of value of this, Helm v3+ will install the CRDs if those are not present already. Use `--skip-crds` with `helm install` if you want to skip CRD creation | `true` | +| `prometheusOperator.namespaces` | Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). This is mutually exclusive with `denyNamespaces`. Setting this to an empty object will disable the configuration | `{}` | +| `prometheusOperator.namespaces.releaseNamespace` | Include the release namespace | `false` | +| `prometheusOperator.namespaces.additional` | Include additional namespaces besides the release namespace | `[]` | +| `prometheusOperator.denyNamespaces` | Namespaces not to scope the interaction of the Prometheus Operator (deny list). This is mutually exclusive with `namespaces` | `[]` | | `prometheusOperator.enabled` | Deploy Prometheus Operator. Only one of these should be deployed into the cluster | `true` | -| `prometheusOperator.serviceAccount.create` | Create a serviceaccount for the operator | `true` | -| `prometheusOperator.serviceAccount.name` | Operator serviceAccount name | `""` | +| `prometheusOperator.hyperkubeImage.repository` | Image pull policy for hyperkube image used to perform maintenance tasks | `IfNotPresent` | +| `prometheusOperator.hyperkubeImage.repository` | Repository for hyperkube image used to perform maintenance tasks | `k8s.gcr.io/hyperkube` | +| `prometheusOperator.hyperkubeImage.tag` | Tag for hyperkube image used to perform maintenance tasks | `v1.12.1` | +| `prometheusOperator.image.pullPolicy` | Pull policy for prometheus operator image | `IfNotPresent` | +| `prometheusOperator.image.repository` | Repository for prometheus operator image | `quay.io/coreos/prometheus-operator` | +| `prometheusOperator.image.tag` | Tag for prometheus operator image | `v0.33.0` | +| `prometheusOperator.kubeletService.enabled` | If true, the operator will create and maintain a service for scraping kubelets | `true` | +| `prometheusOperator.kubeletService.namespace` | Namespace to deploy kubelet service | `kube-system` | | `prometheusOperator.logFormat` | Operator log output formatting | `"logfmt"` | | `prometheusOperator.logLevel` | Operator log level. Possible values: "all", "debug", "info", "warn", "error", "none" | `"info"` | -| `prometheusOperator.createCustomResource` | Create CRDs. Required if deploying anything besides the operator itself as part of the release. The operator will create / update these on startup. If your Helm version < 2.10 you will have to either create the CRDs first or deploy the operator first, then the rest of the resources | `true` | -| `prometheusOperator.crdApiGroup` | Specify the API Group for the CustomResourceDefinitions | `monitoring.coreos.com` | -| `prometheusOperator.cleanupCustomResourceBeforeInstall` | Remove CRDs before running the crd-install hook on changes. | `false` | -| `prometheusOperator.cleanupCustomResource` | Attempt to delete CRDs when the release is removed. This option may be useful while testing but is not recommended, as deleting the CRD definition will delete resources and prevent the operator from being able to clean up resources that it manages | `false` | -| `prometheusOperator.podLabels` | Labels to add to the operator pod | `{}` | +| `prometheusOperator.nodeSelector` | Prometheus operator node selector https://kubernetes.io/docs/user-guide/node-selection/ | `{}` | | `prometheusOperator.podAnnotations` | Annotations to add to the operator pod | `{}` | +| `prometheusOperator.podLabels` | Labels to add to the operator pod | `{}` | | `prometheusOperator.priorityClassName` | Name of Priority Class to assign pods | `nil` | -| `prometheusOperator.kubeletService.enabled` | If true, the operator will create and maintain a service for scraping kubelets | `true` | -| `prometheusOperator.kubeletService.namespace` | Namespace to deploy kubelet service | `kube-system` | -| `prometheusOperator.serviceMonitor.selfMonitor` | Enable monitoring of prometheus operator | `true` | -| `prometheusOperator.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | -| `prometheusOperator.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the operator instance. | `` | -| `prometheusOperator.serviceMonitor.relabelings` | The `relabel_configs` for scraping the operator instance. | `` | -| `prometheusOperator.service.type` | Prometheus operator service type | `ClusterIP` | -| `prometheusOperator.service.clusterIP` | Prometheus operator service clusterIP IP | `""` | -| `prometheusOperator.service.nodePort` | Port to expose prometheus operator service on each node | `30080` | +| `prometheusOperator.prometheusConfigReloaderImage.repository` | Repository for config-reloader image | `quay.io/coreos/prometheus-config-reloader` | +| `prometheusOperator.prometheusConfigReloaderImage.tag` | Tag for config-reloader image | `v0.33.0` | +| `prometheusOperator.resources` | Resource limits for prometheus operator | `{}` | +| `prometheusOperator.securityContext` | SecurityContext for prometheus operator | `{"runAsNonRoot": true, "runAsUser": 65534}` | | `prometheusOperator.service.annotations` | Annotations to be added to the prometheus operator service | `{}` | -| `prometheusOperator.service.labels` | Prometheus Operator Service Labels | `{}` | +| `prometheusOperator.service.clusterIP` | Prometheus operator service clusterIP IP | `""` | | `prometheusOperator.service.externalIPs` | List of IP addresses at which the Prometheus Operator server service is available | `[]` | +| `prometheusOperator.service.labels` | Prometheus Operator Service Labels | `{}` | | `prometheusOperator.service.loadBalancerIP` | Prometheus Operator Loadbalancer IP | `""` | | `prometheusOperator.service.loadBalancerSourceRanges` | Prometheus Operator Load Balancer Source Ranges | `[]` | -| `prometheusOperator.resources` | Resource limits for prometheus operator | `{}` | -| `prometheusOperator.securityContext` | SecurityContext for prometheus operator | `{"runAsNonRoot": true, "runAsUser": 65534}` | -| `prometheusOperator.nodeSelector` | Prometheus operator node selector https://kubernetes.io/docs/user-guide/node-selection/ | `{}` | +| `prometheusOperator.service.nodePortTls` | TLS port to expose prometheus operator service on each node | `30443` | +| `prometheusOperator.service.nodePort` | Port to expose prometheus operator service on each node | `30080` | +| `prometheusOperator.service.type` | Prometheus operator service type | `ClusterIP` | +| `prometheusOperator.serviceAccount.create` | Create a serviceaccount for the operator | `true` | +| `prometheusOperator.serviceAccount.name` | Operator serviceAccount name | `""` | +| `prometheusOperator.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `prometheusOperator.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the operator instance. | `` | +| `prometheusOperator.serviceMonitor.relabelings` | The `relabel_configs` for scraping the operator instance. | `` | +| `prometheusOperator.serviceMonitor.selfMonitor` | Enable monitoring of prometheus operator | `true` | +| `prometheusOperator.tlsProxy.enabled` | Enable a TLS proxy container. Only the `squareup/ghostunnel` command line arguments are currently supported and the secret where the cert is loaded from is expected to be provided by the admission webhook | `true` | +| `prometheusOperator.tlsProxy.image.repository` | Repository for the TLS proxy container | `squareup/ghostunnel` | +| `prometheusOperator.tlsProxy.image.tag` | Repository for the TLS proxy container | `v1.4.1` | +| `prometheusOperator.tlsProxy.image.pullPolicy` | Image pull policy for the TLS proxy container | `IfNotPresent` | +| `prometheusOperator.tlsProxy.resources` | Resource requests and limits for the TLS proxy container | `{}` | | `prometheusOperator.tolerations` | Tolerations for use with node taints https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | `[]` | -| `prometheusOperator.affinity` | Assign custom affinity rules to the prometheus operator https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | `{}` | -| `prometheusOperator.image.repository` | Repository for prometheus operator image | `quay.io/coreos/prometheus-operator` | -| `prometheusOperator.image.tag` | Tag for prometheus operator image | `v0.31.1` | -| `prometheusOperator.image.pullPolicy` | Pull policy for prometheus operator image | `IfNotPresent` | -| `prometheusOperator.configmapReloadImage.repository` | Repository for configmapReload image | `quay.io/coreos/configmap-reload` | -| `prometheusOperator.configmapReloadImage.tag` | Tag for configmapReload image | `v0.0.1` | -| `prometheusOperator.prometheusConfigReloaderImage.repository` | Repository for config-reloader image | `quay.io/coreos/prometheus-config-reloader` | -| `prometheusOperator.prometheusConfigReloaderImage.tag` | Tag for config-reloader image | `v0.31.1` | -| `prometheusOperator.configReloaderCpu` | Set the prometheus config reloader side-car CPU limit. If unset, uses the prometheus-operator project default | `nil` | -| `prometheusOperator.configReloaderMemory` | Set the prometheus config reloader side-car memory limit. If unset, uses the prometheus-operator project default | `nil` | -| `prometheusOperator.hyperkubeImage.repository` | Repository for hyperkube image used to perform maintenance tasks | `k8s.gcr.io/hyperkube` | -| `prometheusOperator.hyperkubeImage.tag` | Tag for hyperkube image used to perform maintenance tasks | `v1.12.1` | -| `prometheusOperator.hyperkubeImage.repository` | Image pull policy for hyperkube image used to perform maintenance tasks | `IfNotPresent` | + ### Prometheus | Parameter | Description | Default | | ----- | ----------- | ------ | +| `prometheus.additionalServiceMonitors` | List of `ServiceMonitor` objects to create. See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitorspec | `[]` | | `prometheus.enabled` | Deploy prometheus | `true` | -| `prometheus.serviceMonitor.selfMonitor` | Create a `serviceMonitor` to automatically monitor the prometheus instance | `true` | -| `prometheus.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | -| `prometheus.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the prometheus instance. | `` | -| `prometheus.serviceMonitor.relabelings` | The `relabel_configs` for scraping the prometheus instance. | `` | -| `prometheus.serviceAccount.create` | Create a default serviceaccount for prometheus to use | `true` | -| `prometheus.serviceAccount.name` | Name for prometheus serviceaccount | `""` | -| `prometheus.podDisruptionBudget.enabled` | If true, create a pod disruption budget for prometheus pods. The created resource cannot be modified once created - it must be deleted to perform a change | `true` | -| `prometheus.podDisruptionBudget.minAvailable` | Minimum number / percentage of pods that should remain scheduled | `1` | -| `prometheus.podDisruptionBudget.maxUnavailable` | Maximum number / percentage of pods that may be made unavailable | `""` | -| `prometheus.ingress.enabled` | If true, Prometheus Ingress will be created | `false` | +| `prometheus.annotations` | Prometheus annotations | `{}` | | `prometheus.ingress.annotations` | Prometheus Ingress annotations | `{}` | -| `prometheus.ingress.labels` | Prometheus Ingress additional labels | `{}` | +| `prometheus.ingress.enabled` | If true, Prometheus Ingress will be created | `false` | | `prometheus.ingress.hosts` | Prometheus Ingress hostnames | `[]` | +| `prometheus.ingress.labels` | Prometheus Ingress additional labels | `{}` | | `prometheus.ingress.paths` | Prometheus Ingress paths | `[]` | | `prometheus.ingress.tls` | Prometheus Ingress TLS configuration (YAML) | `[]` | -| `prometheus.service.type` | Prometheus Service type | `ClusterIP` | -| `prometheus.service.clusterIP` | Prometheus service clusterIP IP | `""` | -| `prometheus.service.targetPort` | Prometheus Service internal port | `9090` | -| `prometheus.service.nodePort` | Prometheus Service port for NodePort service type | `30090` | -| `prometheus.service.additionalPorts` | Additional Prometheus Service ports to add for NodePort service type | `[]` | -| `prometheus.service.annotations` | Prometheus Service Annotations | `{}` | -| `prometheus.service.labels` | Prometheus Service Labels | `{}` | -| `prometheus.service.externalIPs` | List of IP addresses at which the Prometheus server service is available | `[]` | -| `prometheus.service.loadBalancerIP` | Prometheus Loadbalancer IP | `""` | -| `prometheus.service.loadBalancerSourceRanges` | Prometheus Load Balancer Source Ranges | `[]` | -| `prometheus.service.sessionAffinity` | Prometheus Service Session Affinity | `""` | +| `prometheus.ingressPerReplica.annotations` | Prometheus pre replica Ingress annotations | `{}` | +| `prometheus.ingressPerReplica.enabled` | If true, create an Ingress for each Prometheus server replica in the StatefulSet | `false` | +| `prometheus.ingressPerReplica.hostPrefix` | | `""` | +| `prometheus.ingressPerReplica.hostDomain` | | `""` | +| `prometheus.ingressPerReplica.labels` | Prometheus per replica Ingress additional labels | `{}` | +| `prometheus.ingressPerReplica.paths` | Prometheus per replica Ingress paths | `[]` | +| `prometheus.ingressPerReplica.tlsSecretName` | Secret name containing the TLS certificate for Prometheus per replica ingress | `[]` | +| `prometheus.podDisruptionBudget.enabled` | If true, create a pod disruption budget for prometheus pods. The created resource cannot be modified once created - it must be deleted to perform a change | `false` | +| `prometheus.podDisruptionBudget.maxUnavailable` | Maximum number / percentage of pods that may be made unavailable | `""` | +| `prometheus.podDisruptionBudget.minAvailable` | Minimum number / percentage of pods that should remain scheduled | `1` | | `prometheus.podSecurityPolicy.allowedCapabilities` | Prometheus Pod Security Policy allowed capabilities | `""` | -| `prometheus.additionalServiceMonitors` | List of `serviceMonitor` objects to create. See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitorspec | `[]` | -| `prometheus.prometheusSpec.podMetadata` | Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata Metadata Labels and Annotations gets propagated to the prometheus pods. | `{}` | -| `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` | If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the prometheus resource to be created with selectors based on values in the helm deployment, which will also match the servicemonitors created | `true` | -| `prometheus.prometheusSpec.serviceMonitorSelector` | ServiceMonitors to be selected for target discovery. If {}, select all ServiceMonitors | `{}` | -| `prometheus.prometheusSpec.serviceMonitorNamespaceSelector` | Namespaces to be selected for ServiceMonitor discovery. See [metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#labelselector-v1-meta) for usage | `{}` | +| `prometheus.prometheusSpec.additionalAlertManagerConfigs` | AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. | `{}` | +| `prometheus.prometheusSpec.additionalAlertRelabelConfigs` | AdditionalAlertRelabelConfigs allows specifying additional Prometheus alert relabel configurations. Alert relabel configurations specified are appended to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel configs are going to break Prometheus after the upgrade. | `[]` | +| `prometheus.prometheusSpec.additionalScrapeConfigsExternal` | Enable additional scrape configs that are managed externally to this chart. This option requires a secret in the same namespace as Prometheus with the name, `prometheus-operator-prometheus-scrape-confg` and a key of `additional-scrape-configs.yaml`. Note that the prometheus will fail to provision if the correct secret does not exist. | `false` | +| `prometheus.prometheusSpec.additionalScrapeConfigs` | AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible scrape configs are going to break Prometheus after the upgrade. | `{}` | +| `prometheus.prometheusSpec.affinity` | Assign custom affinity rules to the prometheus instance https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | `{}` | +| `prometheus.prometheusSpec.alertingEndpoints` | Alertmanagers to which alerts will be sent https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints Default configuration will connect to the alertmanager deployed as part of this release | `[]` | +| `prometheus.prometheusSpec.configMaps` | ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/ | `[]` | +| `prometheus.prometheusSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. |`[]`| +| `prometheus.prometheusSpec.enableAdminAPI` | EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. | `false` | +| `prometheus.prometheusSpec.evaluationInterval` | Interval between consecutive evaluations. | `""` | +| `prometheus.prometheusSpec.externalLabels` | The labels to add to any time series or alerts when communicating with external systems (federation, remote storage, Alertmanager). | `{}` | +| `prometheus.prometheusSpec.externalUrl` | The external URL the Prometheus instances will be available under. This is necessary to generate correct URLs. This is necessary if Prometheus is not served from root of a DNS name. | `""` | | `prometheus.prometheusSpec.image.repository` | Base image to use for a Prometheus deployment. | `quay.io/prometheus/prometheus` | -| `prometheus.prometheusSpec.image.tag` | Tag of Prometheus container image to be deployed. | `v2.10.0` | +| `prometheus.prometheusSpec.image.tag` | Tag of Prometheus container image to be deployed. | `v2.12.0` | +| `prometheus.prometheusSpec.listenLocal` | ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. | `false` | +| `prometheus.prometheusSpec.logFormat` | Log format for Prometheus to be configured with. | `logfmt` | +| `prometheus.prometheusSpec.logLevel` | Log level for Prometheus to be configured with. | `info` | +| `prometheus.prometheusSpec.nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` | | `prometheus.prometheusSpec.paused` | When a Prometheus deployment is paused, no actions except for deletion will be performed on the underlying objects. | `false` | +| `prometheus.prometheusSpec.podAntiAffinityTopologyKey` | If anti-affinity is enabled sets the topologyKey to use for anti-affinity. This can be changed to, for example `failure-domain.beta.kubernetes.io/zone`| `kubernetes.io/hostname` | +| `prometheus.prometheusSpec.podAntiAffinity` | Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. | `""` | +| `prometheus.prometheusSpec.podMetadata` | Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata Metadata Labels and Annotations gets propagated to the prometheus pods. | `{}` | +| `prometheus.prometheusSpec.priorityClassName` | Priority class assigned to the Pods | `""` | +| `prometheus.prometheusSpec.prometheusExternalLabelNameClear` | If true, the Operator won't add the external label used to denote Prometheus instance name. | `false` | +| `prometheus.prometheusSpec.prometheusExternalLabelName` | Name of the external label used to denote Prometheus instance name. | `""` | +| `prometheus.prometheusSpec.query` | QuerySpec defines the query command line flags when starting Prometheus. Not all parameters are supported by the operator - [see coreos documentation](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#queryspec) | `{}` | +| `prometheus.prometheusSpec.remoteRead` | If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way. | `[]` | +| `prometheus.prometheusSpec.remoteWrite` | If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way. | `[]` | +| `prometheus.prometheusSpec.remoteWriteDashboards` | Enable/Disable Grafana dashboards provisioning for prometheus remote write feature | `false` | +| `prometheus.prometheusSpec.replicaExternalLabelNameClear` | If true, the Operator won't add the external label used to denote replica name. | `false` | +| `prometheus.prometheusSpec.replicaExternalLabelName` | Name of the external label used to denote replica name. | `""` | | `prometheus.prometheusSpec.replicas` | Number of instances to deploy for a Prometheus deployment. | `1` | +| `prometheus.prometheusSpec.resources` | Define resources requests and limits for single Pods. | `{}` | +| `prometheus.prometheusSpec.retentionSize` | Used Storage Prometheus shall retain data for. Example 50GiB (50 Gigabyte). Can be combined with prometheus.prometheusSpec.retention | `""` | +| `prometheus.prometheusSpec.walCompression` | Enable compression of the write-ahead log using Snappy. This flag is only available in versions of Prometheus >= 2.11.0. | `false` | | `prometheus.prometheusSpec.retention` | Time duration Prometheus shall retain data for. Must match the regular expression `[0-9]+(ms\|s\|m\|h\|d\|w\|y)` (milliseconds seconds minutes hours days weeks years). | `10d` | -| `prometheus.prometheusSpec.logLevel` | Log level for Prometheus to be configured with. | `info` | -| `prometheus.prometheusSpec.logFormat` | Log format for Prometheus to be configured with. | `logfmt` | -| `prometheus.prometheusSpec.scrapeInterval` | Interval between consecutive scrapes. | `""` | -| `prometheus.prometheusSpec.evaluationInterval` | Interval between consecutive evaluations. | `""` | -| `prometheus.prometheusSpec.externalLabels` | The labels to add to any time series or alerts when communicating with external systems (federation, remote storage, Alertmanager). | `[]` | -| `prometheus.prometheusSpec.replicaExternalLabelName` | Name of the external label used to denote replica name. | `""` | -| `prometheus.prometheusSpec.replicaExternalLabelNameClear` | If true, the Operator won't add the external label used to denote replica name. | `false` | -| `prometheus.prometheusSpec.prometheusExternalLabelName` | Name of the external label used to denote Prometheus instance name. | `""` | -| `prometheus.prometheusSpec.prometheusExternalLabelNameClear` | If true, the Operator won't add the external label used to denote Prometheus instance name. | `false` | -| `prometheus.prometheusSpec.externalUrl` | The external URL the Prometheus instances will be available under. This is necessary to generate correct URLs. This is necessary if Prometheus is not served from root of a DNS name. | `""` | | `prometheus.prometheusSpec.routePrefix` | The route prefix Prometheus registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, but the server serves requests under a different route prefix. For example for use with `kubectl proxy`. | `/` | -| `prometheus.prometheusSpec.storageSpec` | Storage spec to specify how storage shall be used. | `{}` | +| `prometheus.prometheusSpec.ruleNamespaceSelector` | Namespaces to be selected for PrometheusRules discovery. If nil, select own namespace. See [namespaceSelector](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector) for usage | `{}` | | `prometheus.prometheusSpec.ruleSelectorNilUsesHelmValues` | If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the prometheus resource to be created with selectors based on values in the helm deployment, which will also match the PrometheusRule resources created. | `true` | | `prometheus.prometheusSpec.ruleSelector` | A selector to select which PrometheusRules to mount for loading alerting rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom resources selected by RuleSelector. Make sure it does not match any config maps that you do not want to be migrated. If {}, select all PrometheusRules | `{}` | -| `prometheus.prometheusSpec.ruleNamespaceSelector` | Namespaces to be selected for PrometheusRules discovery. If nil, select own namespace. See [namespaceSelector](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector) for usage | `{}` | -| `prometheus.prometheusSpec.alertingEndpoints` | Alertmanagers to which alerts will be sent https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints Default configuration will connect to the alertmanager deployed as part of this release | `[]` | -| `prometheus.prometheusSpec.resources` | Define resources requests and limits for single Pods. | `{}` | -| `prometheus.prometheusSpec.nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` | +| `prometheus.prometheusSpec.scrapeInterval` | Interval between consecutive scrapes. | `""` | | `prometheus.prometheusSpec.secrets` | Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated with the new list of secrets. | `[]` | -| `prometheus.prometheusSpec.configMaps` | ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/ | `[]` | -| `prometheus.prometheusSpec.query` | QuerySpec defines the query command line flags when starting Prometheus. Not all parameters are supported by the operator - [see coreos documentation](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#queryspec) | `{}` | -| `prometheus.prometheusSpec.podAntiAffinity` | Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. | `""` | -| `prometheus.prometheusSpec.podAntiAffinityTopologyKey` | If anti-affinity is enabled sets the topologyKey to use for anti-affinity. This can be changed to, for example `failure-domain.beta.kubernetes.io/zone`| `kubernetes.io/hostname` | -| `prometheus.prometheusSpec.affinity` | Assign custom affinity rules to the prometheus instance https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | `{}` | -| `prometheus.prometheusSpec.tolerations` | If specified, the pod's tolerations. | `[]` | -| `prometheus.prometheusSpec.remoteWrite` | If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way. | `[]` | -| `prometheus.prometheusSpec.remoteRead` | If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way. | `[]` | | `prometheus.prometheusSpec.securityContext` | SecurityContext holds pod-level security attributes and common container settings. This defaults to non root user with uid 1000 and gid 2000 in order to support migration from operator version <0.26. | `{"runAsNonRoot": true, "runAsUser": 1000, "fsGroup": 2000}` | -| `prometheus.prometheusSpec.listenLocal` | ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. | `false` | -| `prometheus.prometheusSpec.enableAdminAPI` | EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. | `false` | -| `prometheus.prometheusSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. |`[]`| -| `prometheus.prometheusSpec.additionalScrapeConfigs` | AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible scrape configs are going to break Prometheus after the upgrade. | `{}` | -| `prometheus.prometheusSpec.additionalScrapeConfigsExternal` | Enable additional scrape configs that are managed externally to this chart. Note that the prometheus will fail to provision if the correct secret does not exist. | `false` | -| `prometheus.prometheusSpec.additionalAlertManagerConfigs` | AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. | `{}` | -| `prometheus.prometheusSpec.additionalAlertRelabelConfigs` | AdditionalAlertRelabelConfigs allows specifying additional Prometheus alert relabel configurations. Alert relabel configurations specified are appended to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel configs are going to break Prometheus after the upgrade. | `[]` | +| `prometheus.prometheusSpec.serviceMonitorNamespaceSelector` | Namespaces to be selected for ServiceMonitor discovery. See [metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#labelselector-v1-meta) for usage | `{}` | +| `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` | If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the prometheus resource to be created with selectors based on values in the helm deployment, which will also match the servicemonitors created | `true` | +| `prometheus.prometheusSpec.serviceMonitorSelector` | ServiceMonitors to be selected for target discovery. If {}, select all ServiceMonitors | `{}` | +| `prometheus.additionalPodMonitors` | List of `PodMonitor` objects to create. See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#podmonitorspec | `[]` | +| `prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues` | If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the prometheus resource to be created with selectors based on values in the helm deployment, which will also match the podmonitors created | `true` | +| `prometheus.prometheusSpec.podMonitorSelector` | PodMonitors to be selected for target discovery. If {}, select all PodMonitors | `{}` | +| `prometheus.prometheusSpec.podMonitorNamespaceSelector` | Namespaces to be selected for PodMonitor discovery. See [metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#labelselector-v1-meta) for usage | `{}` | +| `prometheus.prometheusSpec.storageSpec` | Storage spec to specify how storage shall be used. | `{}` | | `prometheus.prometheusSpec.thanos` | Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. This section is experimental, it may change significantly without deprecation notice in any release.This is experimental and may change significantly without backward compatibility in any release. See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#thanosspec | `{}` | -| `prometheus.prometheusSpec.priorityClassName` | Priority class assigned to the Pods | `""` | +| `prometheus.prometheusSpec.tolerations` | If specified, the pod's tolerations. | `[]` | +| `prometheus.service.additionalPorts` | Additional Prometheus Service ports to add for NodePort service type | `[]` | +| `prometheus.service.annotations` | Prometheus Service Annotations | `{}` | +| `prometheus.service.clusterIP` | Prometheus service clusterIP IP | `""` | +| `prometheus.service.externalIPs` | List of IP addresses at which the Prometheus server service is available | `[]` | +| `prometheus.service.labels` | Prometheus Service Labels | `{}` | +| `prometheus.service.loadBalancerIP` | Prometheus Loadbalancer IP | `""` | +| `prometheus.service.loadBalancerSourceRanges` | Prometheus Load Balancer Source Ranges | `[]` | +| `prometheus.service.nodePort` | Prometheus Service port for NodePort service type | `30090` | +| `prometheus.service.port` | Port for Prometheus Service to listen on | `9090` | +| `prometheus.service.sessionAffinity` | Prometheus Service Session Affinity | `""` | +| `prometheus.service.targetPort` | Prometheus Service internal port | `9090` | +| `prometheus.service.type` | Prometheus Service type | `ClusterIP` | +| `prometheus.serviceAccount.create` | Create a default serviceaccount for prometheus to use | `true` | +| `prometheus.serviceAccount.name` | Name for prometheus serviceaccount | `""` | +| `prometheus.serviceAccount.annotations` | Annotations to add to the serviceaccount | `""` | +| `prometheus.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `prometheus.serviceMonitor.bearerTokenFile` | Bearer token used to scrape the Prometheus server | `nil` | +| `prometheus.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the prometheus instance. | `` | +| `prometheus.serviceMonitor.relabelings` | The `relabel_configs` for scraping the prometheus instance. | `` | +| `prometheus.serviceMonitor.selfMonitor` | Create a `serviceMonitor` to automatically monitor the prometheus instance | `true` | +| `prometheus.servicePerReplica.annotations` | Prometheus per replica Service Annotations | `{}` | +| `prometheus.servicePerReplica.enabled` | If true, create a Service for each Prometheus server replica in the StatefulSet | `false` | +| `prometheus.servicePerReplica.labels` | Prometheus per replica Service Labels | `{}` | +| `prometheus.servicePerReplica.loadBalancerSourceRanges` | Prometheus per replica Service Loadbalancer Source Ranges | `[]` | +| `prometheus.servicePerReplica.nodePort` | Prometheus per replica Service port for NodePort Service type | `30091` | +| `prometheus.servicePerReplica.port` | Port for Prometheus per replica Service to listen on | `9090` | +| `prometheus.servicePerReplica.targetPort` | Prometheus per replica Service internal port | `9090` | +| `prometheus.servicePerReplica.type` | Prometheus per replica Service type | `ClusterIP` | ### Alertmanager | Parameter | Description | Default | | ----- | ----------- | ------ | +| `alertmanager.alertmanagerSpec.additionalPeers` | AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. | `[]` | +| `alertmanager.alertmanagerSpec.affinity` | Assign custom affinity rules to the alertmanager instance https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | `{}` | +| `alertmanager.alertmanagerSpec.configMaps` | ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/ | `[]` | +| `alertmanager.alertmanagerSpec.`configSecret | ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config. | `""` | +| `alertmanager.alertmanagerSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. | `[]` | +| `alertmanager.alertmanagerSpec.externalUrl` | The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. | `""` | +| `alertmanager.alertmanagerSpec.image.repository` | Base image that is used to deploy pods, without tag. | `quay.io/prometheus/alertmanager` | +| `alertmanager.alertmanagerSpec.image.tag` | Tag of Alertmanager container image to be deployed. | `v0.19.0` | +| `alertmanager.alertmanagerSpec.listenLocal` | ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. Note this is only for the Alertmanager UI, not the gossip communication. | `false` | +| `alertmanager.alertmanagerSpec.logFormat` | Log format for Alertmanager to be configured with. | `logfmt` | +| `alertmanager.alertmanagerSpec.logLevel` | Log level for Alertmanager to be configured with. | `info` | +| `alertmanager.alertmanagerSpec.nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` | +| `alertmanager.alertmanagerSpec.paused` | If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. | `false` | +| `alertmanager.alertmanagerSpec.podAntiAffinityTopologyKey` | If anti-affinity is enabled sets the topologyKey to use for anti-affinity. This can be changed to, for example `failure-domain.beta.kubernetes.io/zone`| `kubernetes.io/hostname` | +| `alertmanager.alertmanagerSpec.podAntiAffinity` | Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. | `""` | +| `alertmanager.alertmanagerSpec.podMetadata` | Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata Metadata Labels and Annotations gets propagated to the prometheus pods. | `{}` | +| `alertmanager.alertmanagerSpec.priorityClassName` | Priority class assigned to the Pods | `""` | +| `alertmanager.alertmanagerSpec.replicas` | Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the running cluster equal to the expected size. | `1` | +| `alertmanager.alertmanagerSpec.resources` | Define resources requests and limits for single Pods. | `{}` | +| `alertmanager.alertmanagerSpec.retention` | Time duration Alertmanager shall retain data for. Value must match the regular expression `[0-9]+(ms\|s\|m\|h)` (milliseconds seconds minutes hours). | `120h` | +| `alertmanager.alertmanagerSpec.routePrefix` | The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, but the server serves requests under a different route prefix. For example for use with `kubectl proxy`. | `/` | +| `alertmanager.alertmanagerSpec.secrets` | Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. | `[]` | +| `alertmanager.alertmanagerSpec.securityContext` | SecurityContext holds pod-level security attributes and common container settings. This defaults to non root user with uid 1000 and gid 2000 in order to support migration from operator version < 0.26 | `{"runAsNonRoot": true, "runAsUser": 1000, "fsGroup": 2000}` | +| `alertmanager.alertmanagerSpec.storage` | Storage is the definition of how storage will be used by the Alertmanager instances. | `{}` | +| `alertmanager.alertmanagerSpec.tolerations` | If specified, the pod's tolerations. | `[]` | +| `alertmanager.alertmanagerSpec.useExistingSecret` | Use an existing secret for configuration (all defined config from values.yaml will be ignored) | `false` | +| `alertmanager.config` | Provide YAML to configure Alertmanager. See https://prometheus.io/docs/alerting/configuration/#configuration-file. The default provided works to suppress the Watchdog alert from `defaultRules.create` | `{"global":{"resolve_timeout":"5m"},"route":{"group_by":["job"],"group_wait":"30s","group_interval":"5m","repeat_interval":"12h","receiver":"null","routes":[{"match":{"alertname":"Watchdog"},"receiver":"null"}]},"receivers":[{"name":"null"}]}` | | `alertmanager.enabled` | Deploy alertmanager | `true` | -| `alertmanager.serviceMonitor.selfMonitor` | Create a `serviceMonitor` to automatically monitor the alartmanager instance | `true` | -| `alertmanager.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | -| `alertmanager.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the alertmanager instance. | `` | -| `alertmanager.serviceMonitor.relabelings` | The `relabel_configs` for scraping the alertmanager instance. | `` | -| `alertmanager.serviceAccount.create` | Create a `serviceAccount` for alertmanager | `true` | -| `alertmanager.serviceAccount.name` | Name for Alertmanager service account | `""` | -| `alertmanager.podDisruptionBudget.enabled` | If true, create a pod disruption budget for Alertmanager pods. The created resource cannot be modified once created - it must be deleted to perform a change | `true` | -| `alertmanager.podDisruptionBudget.minAvailable` | Minimum number / percentage of pods that should remain scheduled | `1` | -| `alertmanager.podDisruptionBudget.maxUnavailable` | Maximum number / percentage of pods that may be made unavailable | `""` | -| `alertmanager.ingress.enabled` | If true, Alertmanager Ingress will be created | `false` | | `alertmanager.ingress.annotations` | Alertmanager Ingress annotations | `{}` | -| `alertmanager.ingress.labels` | Alertmanager Ingress additional labels | `{}` | +| `alertmanager.ingress.enabled` | If true, Alertmanager Ingress will be created | `false` | | `alertmanager.ingress.hosts` | Alertmanager Ingress hostnames | `[]` | +| `alertmanager.ingress.labels` | Alertmanager Ingress additional labels | `{}` | | `alertmanager.ingress.paths` | Alertmanager Ingress paths | `[]` | | `alertmanager.ingress.tls` | Alertmanager Ingress TLS configuration (YAML) | `[]` | -| `alertmanager.service.type` | Alertmanager Service type | `ClusterIP` | -| `alertmanager.service.clusterIP` | Alertmanager service clusterIP IP | `""` | -| `alertmanager.service.nodePort` | Alertmanager Service port for NodePort service type | `30903` | +| `alertmanager.podDisruptionBudget.enabled` | If true, create a pod disruption budget for Alertmanager pods. The created resource cannot be modified once created - it must be deleted to perform a change | `false` | +| `alertmanager.podDisruptionBudget.maxUnavailable` | Maximum number / percentage of pods that may be made unavailable | `""` | +| `alertmanager.podDisruptionBudget.minAvailable` | Minimum number / percentage of pods that should remain scheduled | `1` | +| `alertmanager.secret.annotations` | Alertmanager Secret annotations | `{}` | | `alertmanager.service.annotations` | Alertmanager Service annotations | `{}` | -| `alertmanager.service.labels` | Alertmanager Service Labels | `{}` | +| `alertmanager.service.clusterIP` | Alertmanager service clusterIP IP | `""` | | `alertmanager.service.externalIPs` | List of IP addresses at which the Alertmanager server service is available | `[]` | +| `alertmanager.service.labels` | Alertmanager Service Labels | `{}` | | `alertmanager.service.loadBalancerIP` | Alertmanager Loadbalancer IP | `""` | | `alertmanager.service.loadBalancerSourceRanges` | Alertmanager Load Balancer Source Ranges | `[]` | -| `alertmanager.config` | Provide YAML to configure Alertmanager. See https://prometheus.io/docs/alerting/configuration/#configuration-file. The default provided works to suppress the Watchdog alert from `defaultRules.create` | `{"global":{"resolve_timeout":"5m"},"route":{"group_by":["job"],"group_wait":"30s","group_interval":"5m","repeat_interval":"12h","receiver":"null","routes":[{"match":{"alertname":"Watchdog"},"receiver":"null"}]},"receivers":[{"name":"null"}]}` | +| `alertmanager.service.nodePort` | Alertmanager Service port for NodePort service type | `30903` | +| `alertmanager.service.port` | Port for Alertmanager Service to listen on | `9093` | +| `alertmanager.service.type` | Alertmanager Service type | `ClusterIP` | +| `alertmanager.serviceAccount.create` | Create a `serviceAccount` for alertmanager | `true` | +| `alertmanager.serviceAccount.name` | Name for Alertmanager service account | `""` | +| `alertmanager.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `alertmanager.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the alertmanager instance. | `` | +| `alertmanager.serviceMonitor.relabelings` | The `relabel_configs` for scraping the alertmanager instance. | `` | +| `alertmanager.serviceMonitor.selfMonitor` | Create a `serviceMonitor` to automatically monitor the alartmanager instance | `true` | | `alertmanager.tplConfig` | Pass the Alertmanager configuration directives through Helm's templating engine. If the Alertmanager configuration contains Alertmanager templates, they'll need to be properly escaped so that they are not interpreted by Helm | `false` | -| `alertmanager.alertmanagerSpec.podMetadata` | Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata Metadata Labels and Annotations gets propagated to the prometheus pods. | `{}` | -| `alertmanager.alertmanagerSpec.image.tag` | Tag of Alertmanager container image to be deployed. | `v0.17.0` | -| `alertmanager.alertmanagerSpec.image.repository` | Base image that is used to deploy pods, without tag. | `quay.io/prometheus/alertmanager` | -| `alertmanager.alertmanagerSpec.useExistingSecret` | Use an existing secret for configuration (all defined config from values.yaml will be ignored) | `false` | -| `alertmanager.alertmanagerSpec.secrets` | Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. | `[]` | -| `alertmanager.alertmanagerSpec.configMaps` | ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/ | `[]` | -| `alertmanager.alertmanagerSpec.logFormat` | Log format for Alertmanager to be configured with. | `logfmt` | -| `alertmanager.alertmanagerSpec.logLevel` | Log level for Alertmanager to be configured with. | `info` | -| `alertmanager.alertmanagerSpec.replicas` | Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the running cluster equal to the expected size. | `1` | -| `alertmanager.alertmanagerSpec.retention` | Time duration Alertmanager shall retain data for. Value must match the regular expression `[0-9]+(ms\|s\|m\|h)` (milliseconds seconds minutes hours). | `120h` | -| `alertmanager.alertmanagerSpec.storage` | Storage is the definition of how storage will be used by the Alertmanager instances. | `{}` | -| `alertmanager.alertmanagerSpec.externalUrl` | The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. | `""` | -| `alertmanager.alertmanagerSpec.routePrefix` | The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, but the server serves requests under a different route prefix. For example for use with `kubectl proxy`. | `/` | -| `alertmanager.alertmanagerSpec.paused` | If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. | `false` | -| `alertmanager.alertmanagerSpec.nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` | -| `alertmanager.alertmanagerSpec.resources` | Define resources requests and limits for single Pods. | `{}` | -| `alertmanager.alertmanagerSpec.podAntiAffinity` | Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. | `""` | -| `alertmanager.alertmanagerSpec.podAntiAffinityTopologyKey` | If anti-affinity is enabled sets the topologyKey to use for anti-affinity. This can be changed to, for example `failure-domain.beta.kubernetes.io/zone`| `kubernetes.io/hostname` | -| `alertmanager.alertmanagerSpec.affinity` | Assign custom affinity rules to the alertmanager instance https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | `{}` | -| `alertmanager.alertmanagerSpec.tolerations` | If specified, the pod's tolerations. | `[]` | -| `alertmanager.alertmanagerSpec.securityContext` | SecurityContext holds pod-level security attributes and common container settings. This defaults to non root user with uid 1000 and gid 2000 in order to support migration from operator version < 0.26 | `{"runAsNonRoot": true, "runAsUser": 1000, "fsGroup": 2000}` | -| `alertmanager.alertmanagerSpec.listenLocal` | ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. Note this is only for the Alertmanager UI, not the gossip communication. | `false` | -| `alertmanager.alertmanagerSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. | `[]` | -| `alertmanager.alertmanagerSpec.priorityClassName` | Priority class assigned to the Pods | `""` | -| `alertmanager.alertmanagerSpec.additionalPeers` | AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. | `[]` | ### Grafana This is not a full list of the possible values. @@ -316,118 +400,127 @@ For a full list of configurable values please refer to the [Grafana chart](https | Parameter | Description | Default | | ----- | ----------- | ------ | -| `grafana.enabled` | If true, deploy the grafana sub-chart | `true` | -| `grafana.image.tag` | Image tag. (`Must be >= 5.0.0`) | `6.2.5` | -| `grafana.serviceMonitor.selfMonitor` | Create a `serviceMonitor` to automatically monitor the grafana instance | `true` | -| `grafana.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the grafana instance. | `` | -| `grafana.serviceMonitor.relabelings` | The `relabel_configs` for scraping the grafana instance. | `` | | `grafana.additionalDataSources` | Configure additional grafana datasources | `[]` | | `grafana.adminPassword` | Admin password to log into the grafana UI | "prom-operator" | | `grafana.defaultDashboardsEnabled` | Deploy default dashboards. These are loaded using the sidecar | `true` | +| `grafana.enabled` | If true, deploy the grafana sub-chart | `true` | +| `grafana.extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` | | `grafana.grafana.ini` | Grafana's primary configuration | `{}` -| `grafana.ingress.enabled` | Enables Ingress for Grafana | `false` | +| `grafana.image.tag` | Image tag. (`Must be >= 5.0.0`) | `6.2.5` | | `grafana.ingress.annotations` | Ingress annotations for Grafana | `{}` | -| `grafana.ingress.labels` | Custom labels for Grafana Ingress | `{}` | +| `grafana.ingress.enabled` | Enables Ingress for Grafana | `false` | | `grafana.ingress.hosts` | Ingress accepted hostnames for Grafana| `[]` | +| `grafana.ingress.labels` | Custom labels for Grafana Ingress | `{}` | | `grafana.ingress.tls` | Ingress TLS configuration for Grafana | `[]` | +| `grafana.rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires rbac.pspEnabled) | `true` | +| `grafana.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the grafana instance. | `` | +| `grafana.serviceMonitor.relabelings` | The `relabel_configs` for scraping the grafana instance. | `` | +| `grafana.serviceMonitor.selfMonitor` | Create a `serviceMonitor` to automatically monitor the grafana instance | `true` | | `grafana.sidecar.dashboards.enabled` | Enable the Grafana sidecar to automatically load dashboards with a label `{{ grafana.sidecar.dashboards.label }}=1` | `true` | | `grafana.sidecar.dashboards.label` | If the sidecar is enabled, configmaps with this label will be loaded into Grafana as dashboards | `grafana_dashboard` | -| `grafana.sidecar.datasources.enabled` | Enable the Grafana sidecar to automatically load dashboards with a label `{{ grafana.sidecar.datasources.label }}=1` | `true` | -| `grafana.sidecar.datasources.defaultDatasourceEnabled` | Enable Grafana `Prometheus` default datasource | `true` | +| `grafana.sidecar.datasources.annotations` | Create annotations on datasource configmaps | `{}` | | `grafana.sidecar.datasources.createPrometheusReplicasDatasources` | Create datasource for each Pod of Prometheus StatefulSet i.e. `Prometheus-0`, `Prometheus-1` | `false` | +| `grafana.sidecar.datasources.defaultDatasourceEnabled` | Enable Grafana `Prometheus` default datasource | `true` | +| `grafana.sidecar.datasources.enabled` | Enable the Grafana sidecar to automatically load datasources with a label `{{ grafana.sidecar.datasources.label }}=1` | `true` | | `grafana.sidecar.datasources.label` | If the sidecar is enabled, configmaps with this label will be loaded into Grafana as datasources configurations | `grafana_datasource` | -| `grafana.rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires rbac.pspEnabled) | `true` | -| `grafana.extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` | ### Exporters | Parameter | Description | Default | | ----- | ----------- | ------ | -| `kubeApiServer.enabled` | Deploy `serviceMonitor` to scrape the Kubernetes API server | `true` | -| `kubeApiServer.relabelings` | Relablings for the API Server ServiceMonitor | `[]` | -| `kubeApiServer.tlsConfig.serverName` | Name of the server to use when validating TLS certificate | `kubernetes` | -| `kubeApiServer.tlsConfig.insecureSkipVerify` | Skip TLS certificate validation when scraping | `false` | -| `kubeApiServer.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus | `component` | -| `kubeApiServer.serviceMonitor.selector` | The service selector | `{"matchLabels":{"component":"apiserver","provider":"kubernetes"}}` | -| `kubeApiServer.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | -| `kubeApiServer.serviceMonitor.relabelings` | The `relabel_configs` for scraping the Kubernetes API server. | `` | -| `kubeApiServer.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the Kubernetes API server. | `` | -| `kubelet.enabled` | Deploy servicemonitor to scrape the kubelet service. See also `prometheusOperator.kubeletService` | `true` | -| `kubelet.namespace` | Namespace where the kubelet is deployed. See also `prometheusOperator.kubeletService.namespace` | `kube-system` | -| `kubelet.serviceMonitor.https` | Enable scraping of the kubelet over HTTPS. For more information, see https://github.com/coreos/prometheus-operator/issues/926 | `true` | -| `kubelet.serviceMonitor.cAdvisorMetricRelabelings` | The `metric_relabel_configs` for scraping cAdvisor. | `` | -| `kubelet.serviceMonitor.cAdvisorRelabelings` | The `relabel_configs` for scraping cAdvisor. | `` | -| `kubelet.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping kubelet. | `` | -| `kubelet.serviceMonitor.relabelings` | The `relabel_configs` for scraping kubelet. | `` | -| `kubelet.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | -| `kubeControllerManager.enabled` | Deploy a `service` and `serviceMonitor` to scrape the Kubernetes controller-manager | `true` | -| `kubeControllerManager.endpoints` | Endpoints where Controller-manager runs. Provide this if running Controller-manager outside the cluster | `[]` | -| `kubeControllermanager.service.port` | Controller-manager port for the service runs on | `10252` | -| `kubeControllermanager.service.targetPort` | Controller-manager targetPort for the service runs on | `10252` | -| `kubeControllermanager.service.selector` | Controller-manager service selector | `{"component" : "kube-controller-manager" }` | -| `kubeControllermanager.serviceMonitor.https` | Controller-manager service scrape over https | `false` | -| `kubeControllermanager.serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `null` | -| `kubeControllermanager.serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `null` | -| `kubeControllermanager.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | -| `kubeControllermanager.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the scheduler. | `` | -| `kubeControllermanager.serviceMonitor.relabelings` | The `relabel_configs` for scraping the scheduler. | `` | | `coreDns.enabled` | Deploy coreDns scraping components. Use either this or kubeDns | true | | `coreDns.service.port` | CoreDns port | `9153` | -| `coreDns.service.targetPort` | CoreDns targetPort | `9153` | | `coreDns.service.selector` | CoreDns service selector | `{"k8s-app" : "kube-dns" }` | +| `coreDns.service.targetPort` | CoreDns targetPort | `9153` | | `coreDns.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | | `coreDns.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping CoreDns. | `` | | `coreDns.serviceMonitor.relabelings` | The `relabel_configs` for scraping CoreDNS. | `` | +| `kube-state-metrics.podSecurityPolicy.enabled` | Create pod security policy resource for kube-state-metrics. | `true` | +| `kube-state-metrics.rbac.create` | Create RBAC components in kube-state-metrics. See `global.rbac.create` | `true` | +| `kubeApiServer.enabled` | Deploy `serviceMonitor` to scrape the Kubernetes API server | `true` | +| `kubeApiServer.relabelings` | Relablings for the API Server ServiceMonitor | `[]` | +| `kubeApiServer.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `kubeApiServer.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus | `component` | +| `kubeApiServer.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the Kubernetes API server. | `` | +| `kubeApiServer.serviceMonitor.relabelings` | The `relabel_configs` for scraping the Kubernetes API server. | `` | +| `kubeApiServer.serviceMonitor.selector` | The service selector | `{"matchLabels":{"component":"apiserver","provider":"kubernetes"}}` | +| `kubeApiServer.tlsConfig.insecureSkipVerify` | Skip TLS certificate validation when scraping | `false` | +| `kubeApiServer.tlsConfig.serverName` | Name of the server to use when validating TLS certificate | `kubernetes` | +| `kubeControllerManager.enabled` | Deploy a `service` and `serviceMonitor` to scrape the Kubernetes controller-manager | `true` | +| `kubeControllerManager.endpoints` | Endpoints where Controller-manager runs. Provide this if running Controller-manager outside the cluster | `[]` | +| `kubeControllerManager.service.port` | Controller-manager port for the service runs on | `10252` | +| `kubeControllerManager.service.selector` | Controller-manager service selector | `{"component" : "kube-controller-manager" }` | +| `kubeControllerManager.service.targetPort` | Controller-manager targetPort for the service runs on | `10252` | +| `kubeControllerManager.serviceMonitor.https` | Controller-manager service scrape over https | `false` | +| `kubeControllerManager.serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `null` | +| `kubeControllerManager.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `kubeControllerManager.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the scheduler. | `` | +| `kubeControllerManager.serviceMonitor.relabelings` | The `relabel_configs` for scraping the scheduler. | `` | +| `kubeControllerManager.serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `null` | | `kubeDns.enabled` | Deploy kubeDns scraping components. Use either this or coreDns| `false` | +| `kubeDns.service.dnsmasq.port` | Dnsmasq service port | `10054` | +| `kubeDns.service.dnsmasq.targetPort` | Dnsmasq service targetPort | `10054` | +| `kubeDns.service.skydns.port` | Skydns service port | `10055` | +| `kubeDns.service.skydns.targetPort` | Skydns service targetPort | `10055` | | `kubeDns.service.selector` | kubeDns service selector | `{"k8s-app" : "kube-dns" }` | +| `kubeDns.serviceMonitor.dnsmasqMetricRelabelings` | The `metric_relabel_configs` for scraping dnsmasq kubeDns. | `` | +| `kubeDns.serviceMonitor.dnsmasqRelabelings` | The `relabel_configs` for scraping dnsmasq kubeDns. | `` | | `kubeDns.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | | `kubeDns.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping kubeDns. | `` | | `kubeDns.serviceMonitor.relabelings` | The `relabel_configs` for scraping kubeDns. | `` | | `kubeEtcd.enabled` | Deploy components to scrape etcd | `true` | | `kubeEtcd.endpoints` | Endpoints where etcd runs. Provide this if running etcd outside the cluster | `[]` | | `kubeEtcd.service.port` | Etcd port | `4001` | -| `kubeEtcd.service.targetPort` | Etcd targetPort | `4001` | | `kubeEtcd.service.selector` | Selector for etcd if running inside the cluster | `{"component":"etcd"}` | -| `kubeEtcd.serviceMonitor.scheme` | Etcd servicemonitor scheme | `http` | -| `kubeEtcd.serviceMonitor.insecureSkipVerify` | Skip validating etcd TLS certificate when scraping | `false` | -| `kubeEtcd.serviceMonitor.serverName` | Etcd server name to validate certificate against when scraping | `""` | +| `kubeEtcd.service.targetPort` | Etcd targetPort | `4001` | | `kubeEtcd.serviceMonitor.caFile` | Certificate authority file to use when connecting to etcd. See `prometheus.prometheusSpec.secrets` | `""` | -| `kubeEtcd.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping Etcd. | `` | -| `kubeEtcd.serviceMonitor.relabelings` | The `relabel_configs` for scraping Etcd. | `` | | `kubeEtcd.serviceMonitor.certFile` | Client certificate file to use when connecting to etcd. See `prometheus.prometheusSpec.secrets` | `""` | -| `kubeEtcd.serviceMonitor.keyFile` | Client key file to use when connecting to etcd. See `prometheus.prometheusSpec.secrets` | `""` | +| `kubeEtcd.serviceMonitor.insecureSkipVerify` | Skip validating etcd TLS certificate when scraping | `false` | | `kubeEtcd.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `kubeEtcd.serviceMonitor.keyFile` | Client key file to use when connecting to etcd. See `prometheus.prometheusSpec.secrets` | `""` | +| `kubeEtcd.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping Etcd. | `` | +| `kubeEtcd.serviceMonitor.relabelings` | The `relabel_configs` for scraping Etcd. | `` | +| `kubeEtcd.serviceMonitor.scheme` | Etcd servicemonitor scheme | `http` | +| `kubeEtcd.serviceMonitor.serverName` | Etcd server name to validate certificate against when scraping | `""` | +| `kubeProxy.enabled` | Deploy a `service` and `serviceMonitor` to scrape the Kubernetes proxy | `true` | +| `kubeProxy.endpoints` | Endpoints where proxy runs. Provide this if running proxy outside the cluster | `[]` | +| `kubeProxy.service.port` | Kubernetes proxy port for the service runs on | `10249` | +| `kubeProxy.service.selector` | Kubernetes proxy service selector | `{"k8s-app" : "kube-proxy" }` | +| `kubeProxy.service.targetPort` | Kubernetes proxy targetPort for the service runs on | `10249` | +| `kubeProxy.serviceMonitor.https` | Kubernetes proxy service scrape over https | `false` | +| `kubeProxy.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `kubeProxy.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the Kubernetes proxy. | `` | +| `kubeProxy.serviceMonitor.relabelings` | The `relabel_configs` for scraping the Kubernetes proxy. | `` | | `kubeScheduler.enabled` | Deploy a `service` and `serviceMonitor` to scrape the Kubernetes scheduler | `true` | | `kubeScheduler.endpoints` | Endpoints where scheduler runs. Provide this if running scheduler outside the cluster | `[]` | | `kubeScheduler.service.port` | Scheduler port for the service runs on | `10251` | -| `kubeScheduler.service.targetPort` | Scheduler targetPort for the service runs on | `10251` | | `kubeScheduler.service.selector` | Scheduler service selector | `{"component" : "kube-scheduler" }` | +| `kubeScheduler.service.targetPort` | Scheduler targetPort for the service runs on | `10251` | | `kubeScheduler.serviceMonitor.https` | Scheduler service scrape over https | `false` | -| `kubeScheduler.serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `null` | | `kubeScheduler.serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `null` | | `kubeScheduler.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | | `kubeScheduler.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the Kubernetes scheduler. | `` | | `kubeScheduler.serviceMonitor.relabelings` | The `relabel_configs` for scraping the Kubernetes scheduler. | `` | -| `kubeProxy.enabled` | Deploy a `service` and `serviceMonitor` to scrape the Kubernetes proxy | `true` | -| `kubeProxy.service.port` | Kubernetes proxy port for the service runs on | `10249` | -| `kubeProxy.service.targetPort` | Kubernetes proxy targetPort for the service runs on | `10249` | -| `kubeProxy.service.selector` | Kubernetes proxy service selector | `{"k8s-app" : "kube-proxy" }` | -| `kubeProxy.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | -| `kubeProxy.serviceMonitor.https` | Kubernetes proxy service scrape over https | `false` | -| `kubeProxy.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping the Kubernetes proxy. | `` | -| `kubeProxy.serviceMonitor.relabelings` | The `relabel_configs` for scraping the Kubernetes proxy. | `` | +| `kubeScheduler.serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `null` | | `kubeStateMetrics.enabled` | Deploy the `kube-state-metrics` chart and configure a servicemonitor to scrape | `true` | | `kubeStateMetrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | | `kubeStateMetrics.serviceMonitor.metricRelabelings` | Metric relablings for the `kube-state-metrics` ServiceMonitor | `[]` | | `kubeStateMetrics.serviceMonitor.relabelings` | The `relabel_configs` for scraping `kube-state-metrics`. | `` | -| `kube-state-metrics.rbac.create` | Create RBAC components in kube-state-metrics. See `global.rbac.create` | `true` | -| `kube-state-metrics.podSecurityPolicy.enabled` | Create pod security policy resource for kube-state-metrics. | `true` | +| `kubelet.enabled` | Deploy servicemonitor to scrape the kubelet service. See also `prometheusOperator.kubeletService` | `true` | +| `kubelet.namespace` | Namespace where the kubelet is deployed. See also `prometheusOperator.kubeletService.namespace` | `kube-system` | +| `kubelet.serviceMonitor.cAdvisorMetricRelabelings` | The `metric_relabel_configs` for scraping cAdvisor. | `` | +| `kubelet.serviceMonitor.cAdvisorRelabelings` | The `relabel_configs` for scraping cAdvisor. | `` | +| `kubelet.serviceMonitor.https` | Enable scraping of the kubelet over HTTPS. For more information, see https://github.com/coreos/prometheus-operator/issues/926 | `true` | +| `kubelet.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `kubelet.serviceMonitor.metricRelabelings` | The `metric_relabel_configs` for scraping kubelet. | `` | +| `kubelet.serviceMonitor.relabelings` | The `relabel_configs` for scraping kubelet. | `` | | `nodeExporter.enabled` | Deploy the `prometheus-node-exporter` and scrape it | `true` | | `nodeExporter.jobLabel` | The name of the label on the target service to use as the job name in prometheus. See `prometheus-node-exporter.podLabels.jobLabel=node-exporter` default | `jobLabel` | -| `nodeExporter.serviceMonitor.metricRelabelings` | Metric relablings for the `prometheus-node-exporter` ServiceMonitor | `[]` | | `nodeExporter.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `nodeExporter.serviceMonitor.scrapeTimeout` | How long until a scrape request times out. If not set, the Prometheus default scape timeout is used | `nil` | +| `nodeExporter.serviceMonitor.metricRelabelings` | Metric relablings for the `prometheus-node-exporter` ServiceMonitor | `[]` | | `nodeExporter.serviceMonitor.relabelings` | The `relabel_configs` for scraping the `prometheus-node-exporter`. | `` | -| `prometheus-node-exporter.podLabels` | Additional labels for pods in the DaemonSet | `{"jobLabel":"node-exporter"}` | | `prometheus-node-exporter.extraArgs` | Additional arguments for the node exporter container | `["--collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)", "--collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$"]` | +| `prometheus-node-exporter.podLabels` | Additional labels for pods in the DaemonSet | `{"jobLabel":"node-exporter"}` | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, @@ -445,6 +538,23 @@ $ helm install --name my-release stable/prometheus-operator -f values1.yaml,valu > **Tip**: You can use the default [values.yaml](values.yaml) +## PrometheusRules Admission Webhooks + +With Prometheus Operator version 0.30+, the core Prometheus Operator pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent malformed rules from being added to the cluster. + +### How the Chart Configures the Hooks +A validating and mutating webhook configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks. +1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end-user certificates. If the certificate already exists, the hook exits. +2. The prometheus operator pod is configured to use a TLS proxy container, which will load that certificate. +3. Validating and Mutating webhook configurations are created in the cluster, with their failure mode set to Ignore. This allows rules to be created by the same chart at the same time, even though the webhook has not yet been fully set up - it does not have the correct CA field set. +4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations + +### Alternatives +It should be possible to use [jetstack/cert-manager](https://github.com/jetstack/cert-manager) if a more complete solution is required, but it has not been tested. + +### Limitations +Because the operator can only run as a single pod, there is potential for this component failure to cause rule deployment failure. Because this risk is outweighed by the benefit of having validation, the feature is enabled by default. + ## Developing Prometheus Rules and Grafana Dashboards This chart Grafana Dashboards and Prometheus Rules are just a copy from coreos/prometheus-operator and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to the original repo](https://github.com/coreos/kube-prometheus/blob/master/docs/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts. @@ -477,7 +587,7 @@ These components are loaded as dependencies into the chart. The source for both The Grafana chart is more feature-rich than this chart - it contains a sidecar that is able to load data sources and dashboards from configmaps deployed into the same cluster. For more information check out the [documentation for the chart](https://github.com/helm/charts/tree/master/stable/grafana) ### Coreos CRDs -The CRDs are provisioned using crd-install hooks, rather than relying on a separate chart installation. If you already have these CRDs provisioned and don't want to remove them, you can disable the CRD creation by these hooks by passing `prometheusOperator.createCustomResource=false` +The CRDs are provisioned using crd-install hooks, rather than relying on a separate chart installation. If you already have these CRDs provisioned and don't want to remove them, you can disable the CRD creation by these hooks by passing `prometheusOperator.createCustomResource=false` (not required if using Helm v3). ### Kubelet Service Because the kubelet service has a new name in the chart, make sure to clean up the old kubelet service in the `kube-system` namespace to prevent counting container metrics twice. @@ -535,7 +645,9 @@ The PVC will take ownership of the PV and when you create a release using a pers ### KubeProxy -The metrics bind address of kube-proxy is default to `127.0.0.1:10249` that prometheus instances **cannot** access to. You should expose metrics by changing `metricsBindAddress` field value to `0.0.0.0:10249` in ConfigMap `kube-system/kube-proxy` if you want to collect them. For example: +The metrics bind address of kube-proxy is default to `127.0.0.1:10249` that prometheus instances **cannot** access to. You should expose metrics by changing `metricsBindAddress` field value to `0.0.0.0:10249` if you want to collect them. + +Depending on the cluster, the relevant part `config.conf` will be in ConfigMap `kube-system/kube-proxy` or `kube-system/kube-proxy-config`. For example: ``` kubectl -n kube-system edit cm kube-proxy diff --git a/staging/prometheus-operator/charts/grafana-3.5.10.tgz b/staging/prometheus-operator/charts/grafana-3.5.10.tgz deleted file mode 100644 index 711eb1bf9c501adc7688c32bb2406882d3e9bd84..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15945 zcmV-PKDNOhiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMYcciT3$I68mpQ|ys5H*v2?$0#;j7fq8Ksl=I``Pcpiv(Yi{D|AmF=tI`k-)~r#>ReO0~j+jLJ8{aO%N0Q48_r3 z9{uU}`~BD3+v>mle!u$P{_EEpe_7w$e!a1|zO}Qx`Ir9s#?Jc2U!ec^sMMc~3&j4? ze{fsX&izRql#-t@y-D1%=! z%x3ZpN_)1ILVDuD-=sFcNUTgOf2y@BqeCq)}%=V?2O~5Gfz@dWIqW z1d9#{WTW;ALs$Noh)FigIA#G&1Wtq>(Ad_={~IGNFzcn+Fd{*Z#N%E>$MjqtZn{1GEKj8XD;hQkRGr7E((h;{T2)6EX2 z84IvNFsGiPPZLo(Q`^Z+f7@U0cl7w%Qd1OVn134qFn|7Uyzal=uFU_<{`U6s{C|pv z4N)-Y00aypfd?R1hC2W$PY)6_jBx0_`o!^Wn(nfgvi!AVNuHuWmK>po;||AcN&>ta z1T;%T18~K~=D%l{i$SNua2k;S@d2!NIx8!1iZNWssTqg}@Ygg#B!Lj85uL?y*7Fnx zJ{JrLJf2-BF%(mVWmSeph0Ks3Sb_!SKB2vk2D}&0B*3ZQy(`KtBZ@-xN-!ElnDJiG zNN>e9&?#P?npYBkF-!3PPG}U7Wc(=&k&wM(?(YF4Srp0sNuh~3KW22OIxjJNhZT7M zQY0n==%JMKCKyFxqTf;`2C&)h_dA`4Oz{Ki1CWFWf}(vKq1h=8XcF=P?8q-tOw&x{ zFE{1O5h78>@Y#f6KA}-Kfc3r_nNJ)8N&rOyvjhnYJec4xi!fUQ?2mouazWW;*I#BV z1mc&xhr*a7M2c)Lh%zO*1DcG;IAhq0vq+FM!d~7E?{!vIIx9uPM@lMoLp(xRB#MIF zPKU&3tjNW1N;wgf&2ssfOGW^J##Y|}q*)Xl(})DK0UVBws5oYr<3w~i04po-QRzYy zMKh3GNJcY755~|8=m=Dc$5|Ae;(%eneO=4^f;i&>h8SX$WD16!8zGPc!~_@9$<$kp zf(5x!8hVnQmG0`vu6PO~FJ)wk1ojvn6E4`Sr{K$fREG_I1N8#HgLrnLOa6ps`lBRs zrz0>IZyAcQoIbL+CNXrwFp~_2qTUb*-nCTi_Wdot-|cib4l+i>Y)>lguZnS5mhAFR z9J2vzz21;q$KNrUrTS;5gTD%f_Eg_ulzx!ngb(2J7lou}RAYdd;9g)$nGVFV@Ufm2 zSOmE?9#v(g@5t1W5cUr8hz%dPBH%X{Ka&mBq3JskX1Gv6b1^07F3aC^b zH55Mhno^F{yfBmkqg-UyhZs;0V9s#}BSvGOncyVU<&7YLSMMnbe?$>V0?cgfRVAD# zGxdQ84d9B1iJ6Ca{XPB1;c-zJPO(sha%5&HRu-;&DJOwMPkzKeL|G6v&thrNdgF|Q zxMyH$A!mLR$|lFAW5xUTN`xae#^TtO_mbh<*IVn~ufv(5S|%wb`wB?1>(?3BiKfLy z2Wq1M5TPNC%+Qcz%rIBN0$VFV>;9&i5|eVDO?^L*Y&rnx4Sa1ZC?f8mKuLEw1-oTD za=j~?nN!b-hXV$}CQJhBDk7$4tJ4eNA1rK;G)CT1Wo`~#xEx@W@ZLm8GTU%V> zwE_^4n8=E%T99T^efDGhGsdx+aqAo3e<1P|!@p*jFIQYL(-94^p@Xt~QW^qMQbNwF zW%RDYZwU>t$8m%MIcB}iN=LpqRWGEOXQtvAjWE;0de1g(#THv)d9Q5Iv^ix7$V-LyjvS9nteMe7NtxkjP90@5f) zV_X;pV9aGHvXvJ6Ktr5M&=ZWpUlFw-_<_cd}pNGtW`s~Mo~tI z)`7^GfjJjv20V$7pHY;H;+hyA)s-r z3{AzV_$v-F={y#^4#Qcl3vw=0Mgzv7bTdbWL>E)#EaPN)A)&=6L1S!s#o<*+l@3wz z8zMnOvrvT=hY=n3Nb+wSSh3DMk_akQDEAcL-m1Y`{*XGY%d*7hlP(}GMGr}|&uegf z$zh)cmza%6ggYy?p97`1x_r{rfAkEHKXfF*xGsVrz{!*_s_YY(B1WX#kkSVUEwF}I z4Ls09P$#^lyt)ofriJ-sLIXfYfGqDBSB1;>MVE>Y0$fcc)|r%-Z(pbCP6 zX2(j(042s(7-c%nFO3fn^b#k04X_9#f^Qj(9qv{(S{M=?At^a+%KK7iD<@dGe43}Z zvTsZc{Y}eX!&&})VkgQ3()C*WbPe5;{MP238wAW(*IzIl^BRCug9dXcqILLOij<0& zj%fp;rDLjuu?`wK7AK-t`dC3&zbxxwgM=5zU)tZ7T&uKbl#NNkVMa5!l2$88xIj@P zg%Sq@5#h=M?wEoOx*-`e5=^{dHXKG+`&B7QaAdw)A_ox-F3lI&_MV-QIW?s%V02Q| zoRX83TTU+$nm6nA)e2xDCYV{19w-~1%VJIm%G{5-%@0y!eq>|6 zAme#hqN91wv5+$*_Zj3cXm3Pv4NA@xKIi7$-)-a|pvJ!safAgvclzGOj<$i-}LidZiq!#qk^ROtP;#X(W)2R&KL*JF!bF=xz|9PtHHCofmw zgO_FiUOK~d=nmzGMxnIoUcM|diLolw6A~`f@V+FwYE(N(<|__~ke|zAOjGUa6hmwx zPjg6Q{i{WzDJYOwdY#Hj`*?ORfHNxfZc#>yS2Zpu;?y`Gg=qqH+ShCz%NEjptBt(V zQi@ql|22h%{iO{M#NiUp{L+XQo`9VpR%g-}dwH=Uj`nXZ34c?>)!@a+?l0&6bNcbf z2In&_Xxvm+j^)6?xK}OeOX#M!D=7tXLwVx_3&}eh`5T7y z+mi6kgzptzUr%5zTudv#ydmOvXY1}Tz2VN53m0Ixvt^Pc90hQKN0_PbgrL%7kS26w z`1AuxOKjjPQ@e=`lz*uz$vXCfGBlOdy<60BSj-9->jr26Ej^byA{?lSl{xtdYs%3h z2@I*2_{H>+D5$7_q0S&@Zh_?A%B;~5-ZHPK0GXy$&V?oRF0jc(n$-lx=zW*FF)1T4 zVlya(7>lWGjBt`bjMCf*2xv0JOlXUoBt+WVF%k>qyD_%3bT0=rWe;pol8}LkmYeX5 zqDhnZ5=ecI=$PB@bnL8@0_28Vt+x5n$wjzPHFFlb)PezuW`YC;vtWXQ%kvRs=b2hT z=eb=P(XlQP(-03N%{YzZ2Lt##vCHA5x#A`it*pTN{oP~OEyq!fuVdmm+WU~k?TAJZ zy^`Lt88(_btdge1sG8x|<2gbkQVT%RBbN@h1f>8dH!2EI>I)i2(kmv(6h$P|LA6Xw zeEHQ*dR)oCP$5`}(K#pM3bcg%~7<8#g<3)WZ;a`K#uh{GEviVeVdbj~kH zdOpRBjAoUuLz0B&VImvPNciS&e2r#rGL9Kv3xhZDjIy!(BcO3rY@dEOYnTfa_Fh|x zw|Oichpi=`j{N5l8B&6q&ycOWmj^Dp@~>_+XxX*DG-*#shy%n-v@4)dgactYR;O9m zIFDKzBRJG+n#!h79IF|eBtevgI8=t8dKCjnOyr7TSzC}PHnu<4#?7x8k@b;)2qP}? z$+j)2bVb>vY9&C)OOdaL3p&Qi5K&mAh@lt@%y@%!z2%~Rfyx-UB%hd&DU1PgeVY_dF^KycnMf_(y z#ZCQobBC~{Ef|RQ($8k{ly0%^=t7>LY+|$H0 zjy;JOh8VF+GXb>$BWve@y7RoW0j`DIVF5FmF?e}>4gSwM(3Yo2!0qizW%}pVJ`878+6<%<2zH;w7z^ulcwQl~B-9fu z?Qrbp5(I8<<%c}B(6vs{D2pOlw+p^3<53kXPLY_r>0bL%lKSQJh}$mQfR2`e=Y=?+ zA%_0~9874Zv*9a4>s@)-)x>-9?BOXv+AVbmU{Y^=_5Su2Xi^CY?H`?L-R`CiD>|Cg z0Zp7GI_bAj$mi3*H_%L~>Gd^`QI4cdq_vT&Ta|D7wj~*QEl*m;3_hK_f5lg&;v00X zuRUNW8RHsYmH%qMf^}jgy7HZ@hLceL2{_`|lzeCMqb=D#ohS`6C7cL{dt@1&=d|!r zJ_DcNC>ogV8XuNJ_lSo0m@?6=jgzuRJc}v*s>bc;yoMQQqmgw(h)6X9R9XdG_5Th9gbSW*p5x#<$B$>cGPn;^{=N}&1^pq(g zk>qK=V^`e%Ni#KZuaL0TkLsl~LLHuZS&~N6U`aw=Ei-yAOQUysaPsrvUc&$`-RfNh z;LB?}vXMhIJ6Yu*ll1oGYH6~*D9{|Vi8s%g!KLBOw;Ob!d9W+UA^a` zBo6aGJDWbdr(=>7%L=(7RhokfoWgp(eMG8S?FF`I>9339L8x)Dn(mCPQV+;TK*T1SMtNf9Oa;$ZuTA)+engA7u`QxgNl!hwOjaf_* zByi}%oX3Uun7ww!^Y=E`9{H);|LzjcnVBG`>u2FHMK&@jcKXGJ-=hhd1h6=Dz&}hs|tDg`oIw- zv#k)5B|lCvW72ND0l~5)fSpbCCy7t9(TIG7u2%q)Q_|BCvG-kP&jw8Muxb`XGx#+_ zkzQUvsm@XGJHKEJU6mKI4cQA0LmZ%tW0ki8Kb2EzVMK6bqRlD*Z}VpfhgW8))c6Ey z`6b1{s^2*rfzeeONUmEI+4MUiN>Lk~I4O}g*tU|kFcmCC9N)lx%qeQiv=&&32b0Ch|__SQiH_IN@le@mond_BV32Zc#SKHtv+A1vbMg_7xI-us!Gy-X6fMxoB zhPikk4HOG3B1P8|vZc8+k+?hxT}pr&X!Cn(xaViyQ@8(2u!zwJKhkkP3+%u3jqU3B zKRfIFXZ!C-9wUgPrm}OK4N+i4&@0!;R3D1-epcnIza&YRA4_mTBiwQGxm696i3w%o zH?^4RUw*GPT}{_J)*T-dyZV&?LTNf^4viE60Fx?+m}n_#G-qDc7-55&Yi|ooFe5@` zYq%v&jj}T`O}X&7Q(kO>)j-@`u^ovO{U?ee@ZknX65>R_RwX!K7krlp1bNQ9wCaPh zLNM!G?S6q3=yqiXh%}dN(iy|Bdwd9E zS+Ja=^0`|qw06IAbo!WjKgOcF23-}vlYc~ni|&^~1La7-=P#B@1-+hC$+vjs%KxDp z^}Qj!mUlWSvOe14;vG2-CwL?=s9Oa0)pC%lqB$(|>}x#eX^{U`dpnzv z@|{5U(gcg-|LgUwvi#pz-`LrDmj6%jR09$1X5{T{r*c@Zndx?O)U!cMZ3#+L*!+&vsMXge7Npfu@nuLD zc&QQ&M&bKX&sV|Z^Sa@MJ8eph7Mp=yICN}6pS!S{yZoYFI49;S%HY)m@t^R_*68Zv zFS@I>4;hQPtE;)N%~%8;zl48j+ff>=2@Zs34vmMMe*P&&U)Ag4U5-IQg(Y%v8ij4c z`v-J)g8-*OokAU%Erq>*bDDH5^nXBiPgnEKlnq!d`Clr71B*AG&fa?8cS{&lMuUtj zGOBmdnsQ$l>&mQ&HXfjYQsN%Hn z8yL7smV4riq@+`k zgde){!wkd^bI;0=t>>HXw3Tbbl|9PD9ZIi0v)UBZ?`>ZF;h(zwUkr;UY41JUA6OLs z*?L`#|Ma(CuRq)WPw})XbYsWqUH4Key;Xmi`E@@X=6T*-f=Ekw953MZ)(N6`6dcjQHrKcXTTwYaP<@AkUzTb8mH=2!|!4cF}i$rr{@xW3kz zXTJt zz3iXA_uPGqr$PP~w=6wU0BFAa-`srtdb1k;dA+&wEdQV4aiuXzQ{Kx(a&c43a}?;C ztlFkHFqiO{tIwR+v3;q5b9)^!{*Q>`V)siWg^R>ZxHRlrQybg-@^MqmXq%I$ zmPJsPpNop4&`UgDzPYMwQ8@GY`xbMR}eQ=the*Z&BbimjM?B1 zEt8jN%QW0Fv|4n5l&>FZ#Lvy}nulQNMv?sMsyiv|3bHKf1+uJ{9~^2BGIs(xge=2V z1^nXZTDZL(EGR6{*qH=*EAGK*3m_DCskpsq;5%?0=Z?a@^TA$F0iU~y0^K#}dMEPV z(kAN?U_Xad>F&I85@a=W*8SpOQl#EC%$G75@}kAOS>GtrV(MNkP$EQrae}@{sPwIL z#Xue1+H7?A&)fo3#zKvXy&lM&jm;I?J+;=*MzvQqxaw8T^J?YimTMK;dAhB_-T1l< zN*ixGe5%%6_@u-<>P9d43|%jfjFvBWEOD^8Vodj#+2*yME_}HYduhvJ9(OI#W*aLt z7ylEoc~Sqmc6Lkc?vrYxUg5;$JKI{jDGTL4>m`fZLu5hwtB&Hr>A7HcLIdveyaG>| zxy5B31u_j&**2{{-C-`4>K(UCK%7jQ8U4e-**`w+SIDViR0DW_czV`Y>HY4H2k)Ef zH1)_Orald?+`9ts+mG+}4^CPjx8GIkG*E`S#OPMelxySCqumb&r^mZ{2lwlCy_Gtm zwMo^AfB9t87K~4SIy^o4$o^1%{7$|ARxDbSWV;M_9FYdJ{s zGj$u*Kxa>1a0#odevJw+XFzAf^)F1q&yPm> zA{f|wx8s}vXe_@}U}&DB15agWEo`uI8N$6O>jGVpxTO`?cF%grP+P5s-_rRn!2G!a znhWuE?`r;*c$W+ZYbD$iM|pL>eOC|SRLu4X`?j5J#7LdX-QsrXMej;md1U37nmnS$ zlaXsUri=<2&}aZ>d&iCKm2XaIX-^#(yd30OOr*gVz{%8q_xAksVDHn(;n_dWclSRW z9-V(WJveC_h33*tp7vWt<3Z(vy$$^*JTs?0RG;P}jH~X-&9aC4)y6?Smw49Dgt?~7 z<$Zl&fV#D8Nwa@x8XxbTp8oRjWPf=&+Zz7~w016ltJ6Eb5AJR>Z`@?rNZ7-p)3e?8 z@6V6le|mR#bhDM35=YcS1UG5 z$i}Hy?bH)Yl!tY!h5MJ``^9?~aWZWqkmH9|O+hC%ns4;&wl5sJ%Pq-N619m4=2KO@ zYHBxNA9$P_1-^J7MbxG6GQD$pL4l|~Jj3#eRT9;7*O;@0?Z)0);&>sgI! zS@or?+S)@xLwst|g^LcjZ(Gunzi$j*G_-(5Ba#r|*opSdLm>+qVdf@P%g1?nNBA~^ z>)3^@zHhD(%DjA6EpYc>=X|Sp5n6s;pth(uC&=EM3AIG5sR{A9(WQrwwP9b^(sBHy zI^PyKYdF22P9ZmtYEMC5wuw6hHW!jGjSL3P`3Lu+&|MZ6S{4>emi9dBMqm0pSLJFb zwN%_CYF;}>iOzv|C!4OtXj8Y!R5hQc4Tbz7@0wyvTNY3n8fjj6n6swP7{bX52px#n zix8=3@<6_-CqBE74bfHQdpHu!!-7q?KwWEGCWF_Osd2ZghxviPhW3{a={;0oG@ky| z$O#Y2lUo$HTY3=g5R#j()0$*Nb;cLJtJBV~-Uk|*qRxvsI`EFcx%oY~`swpZ{hu4_&*wir$y3dQt6V6V-}EA-*O9Sv z=1KjchKZvdQzl&FsNv!{4_=sdRkYVBY|&9??s%0GDDhW;6Me+WbbW{gvO8A~b*<)Y zt3^-LRQZqEqS`Q!8uMmHvr?vY+h)yCvlSn07iJ%d=4FcZyERev_EzIpVH9oM@v<@-h(ZHMf!hdqjvuP=GOE1 zKTq;hB(FMVJ>}^^^P$h0Au8(jn1-i@VRU}yAN#zBax$z}ys>wuEvs>t(A%{G2RIG_ z8mIY=Paj3;1o@ZwRf9gEJrqUs3Wvvxj!2{=GataqkOr5S+1+<9mj`ERIN%1*46cP4 zK5@L8rn@XILdqyjk^Eh>EIDGSey}VNB*x{Imx9;akw$nm3q$Smo{|WUaX5exinzL8 zKw@}oKOFD?MMxhGZA&oxYepCj_Zdz1P>O~mB7)eM@(S#mP>8xclhGLV6UDs$;Xdp=EaAfjy_s=yPx(C z&(422IXpWkT6n8lD238Jc$vpPUfMU-+00+0jQ$%30*CnmczP5kQ z(lo*`P6Ud~y-00EM_k`l(e~Y@*ql%*)M;unE^us-{aq}k2K_$`9?|#z%M$1RZLe4F zf7{yJd_MpCNuKNa{Xb1H+uH7LKg<88c$!Rp z=lbDgZ*g<|L3QWXA6m3OoeNmco_To4^tyKWXVq(eRlCkFCu^uSW-YOty2N^;-m+z4 zoQneqrJF_7^Z8{F!+-gue2u6nP>n_rtV^0pMf2zL%Z%d^4C8OA>oFEF-j@dYJ`oLT zlzF;4SDip@+x4r@k=+0Ar&<3$(#5alS?K@u`_=ou`hf zu3TN_npAr+>s@bTb#9k!y<2t;usJu&E;GhW?CI#?rkiK)+`+qDGyCUv`ASb+{yQh< zK2jTtF-5;-IKQE_*mqdpJEx9! zy@BD*7EXe4JgOq}RPkf8smKVd(gwRa@Vs<;14TY=12Be?a5qXP=qQU}t3MBn8)K|6 zQftC?o*!M$@78|I;s&3clE-1UQ%VH%<@TBea=}cP9<(2Bn z%2o<&5nV6;*?81KrSSE2acqfFQ)C3K9e7Vs_#=u?669No>z9PPHJVgxK4=awi%t1m zBiyEHP0Ddzy;Bv-J3-0VE{3&zZN3*_0mOwjyyoV>Lh8*qs>hVXUj=3fiVlxWSDY8~ zuy^atxkSb}?4$XRk94^+J(^D35QvSjXgw69_-tmsoM0#k=ghDp8mbIgIl-iL)H2Hw zQ*6~e2+h8fxU^N!c;v(>-xaj9T%9W`e{UuA{Cu;g)&4^|V)Gbg-~#)vzr9_H|88$T z-~aX`PqPF!;W`@$Zg>7mfk4+>{L_SHzjgHT5ALMa=fA*Q=zkyK>Rug7=aT{4 zc&gUlWsjhHCjb2hMv<6w;o8_}8H?U@)fTN@Z;1}7yW=MsM#q2%Xo%mucx5iz2aiH8 z#M54qMG<(1;9WuYZ`ZPkb6N7X3okMjt(wZ8q5DF9uteiC`IS*?Dr$?FjwgNE|Fodi^Rx6bSN=cJQozFae}AVc z|94(L$N!(?d5-_zTU`D}2*P>u|FKfQ7R>*R?Tz(n{@2dV#`FAtil=$EJ`MlZMgAKO z>*-E#6#IPAn^1b$9e~z}ncB3TNB`;~;(n$VDP3S(&Tdf+j~I&a6=jzu?%!~Cr!4o@ z7Sq>mmT&JY7bxH2S-EA?gYFO&SLu7u^@8HZO@1^S#{xY3HUGT=0ZnHxM1q5$ASjJE zgrqn-H%P%dfnLU0FQNg8dJ!4ouQ=#IPZjRTsy$P@=?+fMy(mc6$$=YsnRnG=1)FbZ zZo1;R;k>@K>z(UYJ6}7uT9p!)f|l`ZU`RWYzU4^6=dMd~`A>HZx}MrGYV?3!2g|%AxXxN zqTmvZG50$wa5f=aEhJGUIPeLMA{a+>2r&}Dge2oNsb`TOQ>;!Ub>5>S?5se7$4bS* ztCZmp`HDlG1^w4mAJpx0G*LArN|0g(5lOJ$@%K;9PX%STvjTfGj%flv@107$!aM$$ zh@Sdaqu=p|zpP!szRtpgM)0p^5Z^K1A^URBj!8`;!Pn9oT8H|BteGh#An4%J1h3%U`T71lmR~xF|U9R4^p)c-8_;j*Be{ z+?1V<*EMv-EXCacbWf<77N61(2`v2wR(df2mCSa#d`A@WrCZ+S)xClDrYPup+&C73 zB;z*XYUHKVdWS`K0N2o!=8Eizo1nJ_U1{lb^@Xi)3tj1^6BO;^2+dA$K$DPn2e8v$ zgRUSkrkN<-ZpybKM52u0vkAj|LZh%dfc3s4gy9bIPRaWaZbhwq%`C%UI~frGtU%WIy%5`N;wgfwaqcv&Y>&{MbQP&;li3|=D*A0=-?&L zcu_5jmq245UR`|1xPT#sAKt*W-}n0$t986~{0)Em?i6$IO0y`^-t$6w$)YCxa6vyV z4o62+95c*uGKX;&;bo#c?fm87#Z5$LK3>;%kbYIX+%08uq1?EJfvKgo^tI!a@8-N4 zdL_K15Lq5CwGHoTf*He7ZrtIOZf{aDer+< z2`?ws+zjf*F0Mi85ZHF;kHcR&a6eZM?PXlh*o62OLw^`vmHUY1=^>r6!tL!pi}6x9 zwfB`@^&4>7#?j*QB;#1V(&vvij?xBMASZHq1p!W_BctL-bMWdtz&bA7!ZY8%tl=f= zsDzmoygq+fs6RW!1w0bS`-pWM1+J8mO9 znBXvrFq`M4K*>{)@GigSkXMCQR7@}fF+qvF`EvFCu8Q`c>Zkj+QTP+b&Rv&gxlMMQ z3(7ErNT5>r2Wiym5$$t4f>ZLFqE{U^b?s)Y{Kb0z9eEUbxtASO0cv`o1Z*id{&`PQ z%Rp$L*F!wcdFRq!l<_)~=Ko#j^;md0=lj>;-4vHXNGn$rg}6N+CMDhpM&U1v2>dY# z@Y3S1j@LY^?WXy%idOsWffBFzDuxDlZFsr&H&pQY4Bhe#O6fH4Qbq+1WIwvlOu{;U zWy9lE!|euCIshQx*$_g)tSGCqo5kxvkrt^OCiE)VM*_{0U^j3$();GtAf(AlADRl` ztQD`uc+g_J1R9shU#Nt^yTn));MEupT8vjYHuTU4PzkR&_HG?7X#>|pN zeeg0$;L-8gv!|5IL2Vvhd8Swu!+RlD;8ni0y-j(x<7KnWYJ|H#UTKt#$s$)&<8`cy z8t#oKlI)8jnvC_zvP~PXBg~YGA|@ygIPiji4;fj7mra@>$pR$G**4-M23E`kL?F3DA^2{Zk>4NH@=nKE3C*7l> zVZL%NNcMhz_jpl+)HHHmk}X^gDB5wB&6ijic&L*}!`-D&yMbdGF1b|k2zc3xk(3=j ze;jQjPMiD-p~J-`xgujm!rtZg+%pXg`hMSE-Wi zAzaPNHK{MJCM3`)eh$I-gl17#cpS^7k1fP&M5CgcH}Ia0Nq{2IujNQ+bS%--EoD_T zc@NpT5U-GM$x-`?FD|4-9SAa&-mjp5rj%%2Q+y+3J{=iD1BVst?i9XWh*v39;|46B z%uB{_pu>gKjQ376FnIAxlAcBUXUxc`ajoYDPO(s4Da&+d|B|F2A`VkohXh(r%1L>d zNuEjx1S&7M95!g9hFZz0HGBi_<@?;Cl4?Ab#+LFd>ddT-0$O5f!)NDt>BUP~EjX4K z+ePwi&e|%RNkfgWMT$m7iqR-i$6C3VzsiybbKcmXS;^7hbvTD`;8?8sdV_^p@xCHa zqyi7p?ZcrmfO;ZMdlmb-ty>MP)OvWPCWg+&H4|A1z%?L3w{JFptJtn7kR^(Xr5!hi zm{M-1nQ%#>>Bz83yv8V8HuBWgL9HEuO)4VKf%g_pg&Ql6`m3%nY|#3CtC;IovNu*Da8 z@oh*p^4s0mtw8xdis3ZLV)1R<#nTUG$5If5I1vOz++W&~DDcWSW3155 zpW~%4Xhu!~mR-?E$zMw#R^VlCU7Cm6z3{StHD31D0^GC@+i?9?OPJ(tUa2)7O%{1xMgMjVgVT9qu3xRT%q0PKbB794i&N+pqgp?M7+xtWp+w9z*>WO) zQMKZTSxmUPdmIplD;(*if-1>Uo_^wZH%)h0+?HG@O{fuN0R~$Rw5U^T(SRJozh;DC zUhnHFRFE(m0NWbg$0L#y+XS`wgTLY+lT8n2Ikk&bb0_5?SMWMpv3{yVTeBDCGf6IL zsSAF!LKtS!>qe0}XyQ(8cI_~Wb`jfxm)h3ST%qr8`FGFfzft=pSK#0)iqlAGz}h0I z+UYq%an$kZAKgr#e|ktaKh5Ej)}ng@FR}sm8!^TESXf1 zPP-HMYZ{@%#GH|;H=$Qn2xAmXOgn>uszo!Bm_`uk<33~qUjE%40rGE7lYw(~&9!s1 zNY@E^cLyEki|_l+FLi7{swPt6H4qL+#}fpV<0Y}p{f$!KFeFT+PtV-%dhc-l z$-tIS22R~x6$xLGMiovOmE(pp?s0*NLt|Fp1dj$V5hCTz#_@p0PJE-cVM>n^EDpl~ z*xw9Ki9WN+zA1dG0UYOTs*mcFp*n#w78UI?bmfnUm}Em~^GQ*OrP8KxPyX%|Vyzb= zspecHE4{We=f~aCgPpC?%yE`G0~@6^xtl7VBwzYhE^kONSNEwhEHakB|D1k2GD8u~ z)TD7#F|ozuc>jnv-q`}21XQ}gYS3R{f9%6M%oq`?on1)q6{Ptl?+M~C#5gIZsbnlt z(=Sk);;FQ2W;Dy4M+C#2Eki4v>;m}L3`IN-D58-tu_6fyCxXoi^g)tL z+Z!^J1QX1A*ZMDygtt8fzIQGEqkdbe>rHfJ2?FxY zToGgjA3mL)NuFdljnw_>!x{W-S!Lr&eWK@ zxTVecH92#uI|bx7z4^XK7IStBm{4y~{0a%#6BU7Rqg|$|jmsIb3d}grOz4%N1e>TB z(tAgw1`U;AsG4q%%=^kG)y(Tu5ECjn-X>OYrL?aY5dtShk?3HsiUg|E?UJ$S!`?*S zJ89-x1yoEEDNUuzl<2Ty(~^uVsUwS_i8O{j>;l8m*u_bx*Cq(JRsv0&hScFfIj}WJ z5tIPKVHVhBpg4;Jk;YpoNUd~V8I&BSj4_-@!7HUo%4mvNG=rEhrutC?J^hZg39I4d zfzl0kzHx~wXt~W+Oq%jJi%?q;k;Z}rb@@H7sAqGz(P!WHzvN1*JS*~W<@{;U5MR#c zMWV7~?>{Rd!o|0q6V*8MH<}bc6YF>R@QIrM562*lS>4>4G0Fr2>%=?J^Z+GNI@_tD z(RA>!sjAlRG`j7=J?-W~3wtE;mWqZ-<0KoC#IH*zd5sny4tOBQ(hWGd{+Vm1|T)fu_@ zJz*=B63VtPM!Pu$wZL?}V+t$!(oty`+$F|gF8?%1?T`%-*Y1Y?x?fZ;&dAjLeaBEb zA);_jC>tM!i%?L9EMX>$)Rhw!>jV9DrK<^&lX8kkBcNzgYY$;}G6_UN*c)SE8XHSwZMgG^BG&HpZ4zEtNsZJUE$mkU> z7K%R|uGSjWpavRB(I!d64{}I=guV_c+HjhndZ=JsnqJDzwTn!h%1Bmv@Pjm@sy@!3 zrw?15P%Z}F_rLGgYuVAuBhG8G<&5VI+V{2Lbm~gmtXeP2Z<1=RV^*9?b?ir{-jLGh z2hbX(XOcc(h&TxVWny9qsu#PNm^8Fl6wagl)mopLadm21DpYG8*QwKpu~JFSA_z<{ z3Nf>%oXXF8#n&3N7_m#E$rPWsQ|1Hus*g`pY&o{4$MStQGNNF`DE$FO1V^C}I5wai z&}5309VV!bZz%sjSQ|&aQZmtrsO%e|XdXMWa&gyP_t(3{cM)+lKIL;??dxA;|9AH= z6Qu&qSSYNhx&*@tu5ur660n)nLpE%qjwQnXQ$tlm+&0zqeZO1!5D}b+xgUSRv+}oT zA>b>crml!nOsAwP4v8p9++4bUInYJU`FR^Yi@t9?$Dc zVQyr3R8em|NM&qo0PMYcR~tFfFus5LQ`9!uCCrQ4h7dB3pC@yc2_(xi3E>B1=IqJl z1-VG=+>>i*#@2-!C%9XeQmIrol}clVMkqnOHxtB!KSOc! zrELbJySTw>u%`0VAnUug)eIB8)WIm2uw-=HLfDd8E4 z$S@AUiEewvIs zm)Q{KL;k;8{D6_;$x{~ilPkjUq=AU=l?JIuU^*p z|EuloC;tB!4;!Lj&;bY-Mgk8&unczqQl1_pXc*zpef5dsy)@lpF=hE{$&z@A0$Fl| zA}-(IYr)V7jqphpVLpJ*UpfG5&;^u|cZ_Cf@eY7t%g2g$Q_P0;w`%N7lu4Gpnl?FR zQxf34AfQ#YPlCdu9ukw{Lb1XsR7*PjyPzHA*s zh|`G9Vkv4R=sp(=2|S+NZ43cT0-OroyQ1teq9|0a1fx9(U`YmN*05AIe zey0);eoN$45%(@IpL7StN>rUZ+E1G*;+hIHjBj%4WHGk}Dnn0*$RY36N${bW9@> z%m#2cI-=s3VU82gk?V8~J}L@EQ8WWdZ)7x6_+SjpfQ~@*c$`JiDGnGG+}E|tFNiZP zV2B|`Nv7J+D?0>|fSBNdTbX*xVbED?H0(;kRjOf64yCIg>ZXiLk-%Ms$Ak+u>#6n? z>q!e}2Zk4QA)cM+l0V>?{wUGh=?KilTZUpR7m6&7D{NS!9K0~hBmttRH$;Ma7Rz4W z-|_qMC&xj?h?u>Rit@GQ2A%3lIdxgG$3Jn*2C(x=PSA)eeyV?VI-36~48$n?AX%o% zezi&(U?zAsu%*l}V_EoEFCHv{TqWf^>X{-u8|nhG1u~M{4Rb$W0@mO;HMFo{3hVYi zptP++$p=0r(ad1W0DlmtVzy6M3+ySQQz8{$l8n0vl~7>D7YxCz)GI2fHN3|;87F|? zm=UV59U;!;{DPoTC~zPn;8=9lAgvY|bUGouN+gfmM|?7*h=u&HX`P`oWi(|33ET@Z z7FiOABnrjcl~(UTliE~M$=Vtm(-8Iwi6(40-k0}#JC96eN z+Rs_LD^@#eB_V5jIJizJ$7=bSHo!=5iOwMg6a<)a9Kwjv7-%Lq33YkH@8J1+io)+v zgpvR=TYE$CP|8ewAVN2AMa0Cgi@d(kKMs$J%5aK>DwG2=OR=(Z)S0jQ&`I+#^hK5 z$t7?-1G~~Sck4iHHUOexc6HZ!ZViN;4oSugb6#RxRx7NxCZQ|i!PNHyxf}+Nj7f6c zSWrZ~ivlH>p6lpEWWfH$ zaGxOxE^!iCu>rOLgLNKt84d}R<;YoW2@P?V;|K>zweGBS$$&gpSc=bqNJW&aH5Nn zuq*N|)XFb@>STstO2ba2 z-2vQmN)rEirYOs3Q$J^DCNVTd@sLLU^mVXH5nU3#uDsFfEq|+PbtxOa{-2L0@6M0*J{){!KPuN?H>5El$?I?OhA6!x zxZ04M?=A{M$va;g0n_)@{|1uf+j3dT1;Z%rW-PMhwzglXl3N2MGuDh{V^?AsY27P4 zH1DC9$U?TbQ=VeRrP+}LQrO7ZJGO5G{79oL#_%RWB<6RMjd{DGJ5{Y{Dd}`qNXm2? zG>M`$kX95L<3jBMW8X+YrSyRhG{m{wIKe3Vi4lQ6CV^~#lV7CS+yo1gyK{<$|*nbMvvR*bXy38&WOzwNbO>zfd;KLhG^ItrbbR zbW!S=mP@+)$hmxF{jLe7U9Ag;bh+b`Ul!ri@fa*`Fh|7PE5$9Kwg#+M% z(PW&O-?DGxsHiq8g_5KuRbNqwubWpYp((5lUPI`z%nx{Gy6zgnvRye}oZh7cF(L_u zfW|S>dfE^JzQ#c&t^I=5VK~d>LQaKBfxtM#p)|xaiY}(gyMdGGg={TG2^wQFDh|&} zVs(g;Ul0i*nuThsWf;+Mk0k%Xfo1W$OA!H zc2XEv*E#Id;1aVDiEwAlj&q=dn8zoc{-b9a`CYq?j1edGYn)68qe@qSDPlzO4at3w z&6Ln?eea5W*p#F7E%Ee~r$hLRct zRV_GRb}Xk1P-3hmsq0HJmx>4odWjRh30MRYz_*OX4s~mhf55e;UvUN5-^7Z zU)h1tp(L$4f^wqTvcMR;?S&!HK9!O!t&C#TmGXB;OI$B7t`sj*Lx0o4z>v9rAJ3c8 z&2+sMmyB+`D~Z3&5n}+bZY0N$F$3#1G*a~s&BcgTF6LsSRKyHSYc?$dQzeYr(lD?% z5xvsK3c&hhSurp`bOZTIyJX}NQX)Od#w6h|qZwRDb($nxpeWJ=h9DwbIVL)$paX75 z#*74$?l2n;BdiU+6eTz^-z}1Zhz6JDi|qT2We+(vrD9?PWL3|ToW{2s{*;Y#eq)T1 zl9O$%+)&5)X)R9YmDSU46RggF#pCX8!Sn@!zdf|5iz?WD15da-!SU$|H-N|h0&?G*d9 zj5PHv#}^6Bn=Sil4KNWC%&e^el#S12F{cD&?nm9{2g$!bvaw&F@jNWy(Y)ta$d!^C zGjbZVnIx$OCFcsBbMx+R)};|p<9G;hgatl##@O+#y|5=>3m{L1F7Vy;#iGV+&rfX3Uoy@C8dJFIS<1bu$6$&U78RQ#qnh zC{@h$^)itd6-PZG;ZiK_OSG#_wZ&$>;*bcvVd>7X-X(CS*m%+ApvW3~i)K?$Ah9%u zmCE?>>|g+AR7&2WjApNDUQooTv4#pg3F^47DPWc?q%v8Xd8elovmF1M3Jm*8YeR^` zC7$`E886HdyF#qYq%rR1#R>=CzqmyFP2uo^XD535{QXtpII8LyTw6mSRVOTE?_g%rnOKe+zR$j>_5OcOI5$`PcUSWdv1m?np zw9;62h&bNexqF-4;qH#xF2Hbi$3(I@0^kIXFjIaLL8Zzd_4&xq=?9dS$iP>6e-jxf zV^>v@b?gVF^(qr!uc+mam}M~5CeaL9no)H?I8f!pbApF9mDNcS7*a9ui{&LjP%a9C zok33A0?EIXTBBXhWm-|5JB_QH3QOc&K$EjHD+!DS{w}wvQbuCLW@Sfyv9ygvKoW>i znk(f2O{SO$Z61(>NR2y2Vj+Jw%2qjCqwww8q9h>$vteO;MG7ZP>`Nf^J)&c7ztgd^ z&km3)a<$s#ODAXHM%2vN*Hn8pD4Gcp7|4PN4ld6}l$~cP@`C4jX++1mNK8XKkT~Ns zk{?XN=ZOubD6JJY+H-9U-tX@n!(Q3JYpfmPWYX4$RBlH!is+Rz-OaSo+sh>mcHoeKiBNBgz&t_3aKho)adjK$7!U@s^Lt5F3bx_wC58XrBZ;4G%Cp}4Xe9@ zDlwEV^;}fh{Xjcb-wg60q`oFw>&n*d){3mrTPq{L;=jS4dr|f+UC8-Evb%zjHh8Oy zD?kM+Pq3Q(3eIKQk}xb0kpz%pt{1X!S5Dl8-{#s{gA3i70!M8qYhDj@q#IB(e^GV< za6wsNH(N7WL)@{Xl=onN-gKQfH_GvOM8{(}u-AoK763kf)0Cq{UORIFz<+r?RKllADf4grB z9k=a5RCfcE^62h^S9i7xuYSL;cfzY%;kw4lT7{Q=vvnXB*jdDX)Jsf7rBJmvm7KdI zDaeJXR-l!sl7KF=A(r+X1Sm3_{iT+2`4yIegc`W7tg4AYQBM=qI0`0W7-GaO%>vXK zjjWw}?#}blrg2RymLsM#)E)5&&7u${Gzv|pifpkvzwkUuBFs5l7_IQFYU>EaSS?A8 z#YV@qPJ1FwT^me8a5lkKt+dccg?bZ+;oF)`0|K56O@Sta@aaJ@ ztbB7A=^@u(9YB&rkpbp#RII{H%_lUKYEU>vESRV{E(}>cAzd{iBryR}gqH&%W#b|!vb73YFVO9fO96@YOEk6VI9hD0DmJY$ ztWM1dV@tbU7%izJ)7JE@yY12>n!ylH(3H^3HdU%u@Q0GK_5K{hVd2ZL5L^5ni^f*{ zW38IT^B>j>v33OSNiFKs7uRBCXG|8H|9rW<`>JyOXJ_lx)A`RwdA59bhXu@N#$f&H zSMYz-zMMQH0B&#Bl`@&@kJ9us^gEDd1=u4aV=Sy5+wB%>K(=~Koo!!< z?sw&#J>ycy*d;sb_wz2Pi#HusB!7ph5cU|AW(Y zTcVe7*5gBx^oosLHH&}!t1ZGOd0`t6Q9!+q1tJmGrkBJ+qtl%Re6Dq;BJ3nBC@%ex z8N`kdTq%K!qAN7x5HQ3iz8d!1mSFVFJZKy<_;m9AIp2^1ZP5ApwF?X-V_chF6@hCe z7p!qAiAKJY)o>E(KLJM^o09KLe7Yt6PbW%pObI8#S;Mjn&m(DgDT;zmup|LZbjCL2 z;2qIW1sHp^)uQx_u7wm|tJQP#asS}__~Xgh>u1khUa=IDs4xI^22I5$e(rzq69dSt zmxop_v*0%B;2j=UfRj!0dx1y$Z@n~orw1oL9=>Uqz@>Y=s~B4UYG*cbux1yloMe*T zu5XkU>$3vPL7R~2IxDy|-T84351J>tvfhqA?yFGZ3PRMuqvhry0uQC>>qg8JSlMK{ z^6P3taC_^uu8J57q{O&vC|?H_(52v07RZRQD{0w2v%}SAUP|mR|FiSLhxc?$l4AcL zS7(&`<^rd%l^@@c3Xit#T6Ch0YMqSKc9$*Sgdn>jd2Q8?X%PT^+B-QqJbE`MLaU|G z!L210{`t>;R!dgd84=}JYba_bPQ{11Vv-<%Lm%cmZi}lNwKJc; zwc7lLdFu9`yNk4%J{H)2FSd5JEB4>Z{?4l>`|mNHuU~uLz?8%T;8-Xxkd!E|V~Lhv zf(P(TPjS-TH=Tp))Urp{OQvE2{f;TuHGuZ_rB@8u`bx$QJY2441Uf~q&F;3s>2}s&QDmk zuF4D9ha3fmAr4T+u?mfWAIc%bFd{fI{%hsZxADS+!*jD6YivTbp_AfZ!|xo9z(`5m zNG{bC+VndjN`4?6n<;@e(6*wsP*N;L9NW--%rR=qv=&$jL?ZG!dS1{h8wsV;f{#{=)&yZ?Hsu6KLf8R_&X?ZB7QtQ@t`X}Oqh zHd3q&`Soi~T+hwkVxyA2(bis~_0ckIR9d#VEo!z@Vwr8uY)EhO5*4{y*wWZ!_xnN|OdV(0&CDB-=-otaCY_6BWesgx3^U>&COW!?QQ?1sJ0ANFQ zPs%v~8X=QP(lVfxj`rqXH?;H19zu|Xx`8a!&BJAF#v})A<+fA-zX;Wa+uOl{!UByI z>E!LZ5kU*;7Dt$^BT zT2O$UA2|WCjT~W$C}kVwn{K%x9u`rq-ZX3`+$Dxur_$zvzx(x~?TG4jX6@W>eOhEM z*4Szoj{KbebjuXyWMvWi%LQfON}ea@N=tjeu1J>$UH7PJ%c{5CsZUES+ft3=+FDf$ zq>RlE2WNl(xL+mj_lKuvm4fg0zCU>1v<&7UM+a;b>&G_s_T&5ggOgfo?dkKJMyqX| z9Grgqbn@n)*4mJYe7!6&k(G$3I^iAdeKJqTIH1Zy{b<1;vr~1)Q;+FbweZh1;NfnqY{^keUiE ztD6_9n@xBi)!Rxk(#9B$rix{T=k#2LA*iU%jYRIqx%)CJH`@9)VpVtGK>2|0$bjZI zX5wydZzZDDCh?rYwfJnfy|n>3#r&<7nHBPX#k_1z!%wU%XJz*2Z#yGfioR~(=Zp$m zUh-vj;CS^7a7`BM0-N=2R%)aAUzuZ@YN=uxwCH-3(sG2B)zzlYMZ{|KYvc}k` z?n?7DKOECc2{nAXhI*xbuP~L>iTnnF_G(4iO|HtH4SwdPv;AeXUg8;FSbQFS{CncX zZ*my75mRwsr3THijI2!^?p0GAHTe!IMdrYo=eJoAq z)*Q&(M0CGPRbjbdly7fX%|I$Wn|#Ohb{oR9k1AbNk|4LAAj)f`j0zgiXaHw#j@vtR zE>D=#U!Lc`xn;-FZirXL$<%-M_WbnV&8L&YvwxiL?SD8tI{$Qfa8gHebGfn!_mR8MZN-V}?PU0p%WfQs%k513_-oHOTe*fv+;nC@QH02k= z%*E8AQyz5;xJi?i@PepRBIOBaq=XM=$E#gEb9-){QB|d3NmLd?73`Ih?R`OSp~d-B zxOo3?#hZmySGAiV*ztzC__MciR8Ut#Tw_}cyLYn!-xGjE%czT!X%jG&JJ#xcx>A7# zA6@m9w|WcZ5{*sF@e{WB2b=aAmfCA*+-Im8jU{ihd-I4aP`Zi4?P&hiMX61DTZjCZ z-`sJ}wwxYM#aHL9-1{z0vo__iy?a0ZRNL!b5E9~Qwk?i} zGEgn9iu!7YwQHVv+ohQ7RM{t&iCCo!R38i<@22t2GBDX=M7-z}%M&H%qHqUP7NPx8FysmpY z7De+t$3HiISLay0bQ*Y-h(6b8T-^MkZCi6QiiS?*Kd%Q!x-htT)9<7j=g5EX2)RH4 zkFtnkp+~wV?TQIW#+~vEH;o?~Q&^=Io8v%Tc5}GR%~b2>p>3DEOZR8-=DtTarf#*N zJ?$=}xu{>tEz~A*%50>{8j9b=iZyimL1kK%g`5`+h*-)s)CS#MmRxy#Ed@xEbEgfI zxl{0TF8F>={rrbXQ^IJ3S9Bfxg7d#yyW3mU^S`fN?mV6Uc#Nk(Y_lN>>=Cr*uG33> z$kQ`~N_<_CB+M@)J)sfqxK{?K8YmMJ%E&MJ*38Rq)g?pIt&U9uF(~ftRO=k2>B6`r zEl4KFxLoT+&B;l>F#OcX&p|Dzge@?^j0hEG;g&cxhaDTOs^Zc&-AL(%_(of3r)E3c zLG~5v(#kjIP{$mx{JgPP`(X|>X^pq;s~KnN{{6dwu%sX^U*_+5UtkS9Pa;p>3vChu zG7NjihcK1}%NbWcdqvtnZxcLoNstO!xA*^hV=Ux1mBB{-5fLuDFO^NG(oDBwq+sswM+fiBd`Ct{C_AhpNtHn!M|oDx|G(EX zb3ZX~FK%e4^gmaM@J!tgXIyG}Movp7Cp!zwtk!duww}o&+Ln>2Bj>^Ri|ZP}eWe`a z;$RL5J)Jdw(9^*Gtpshb-|r_07V-aATRUa`zrD5f;z|F1l;=*~9haw1w=D0s?wluu z{*DTL&6%|vueB&HElP6%NcF}xC|}s-ZfdU2j~>fgH!c{POWfS*wq#|U13mX#<(bKs ztezjru)XVKUn7D7S~eqP*Ce&yQNukbHQ67V*?rjjKo;a>g*H*?}r zq@207%@qSB3CSZ|J{Qp`U#gq0IKuI!{^v&&sS^k`RwduOl^@~u7GCGSWYPMDsq^Q8 zSoel4;l+_RwRrhrSQV6}z1 zFN4Ikn!hff9CXFzd9jhZ5s<^}!V9~-s$`clla zS}IR#A(?{__3EU8VYT6aQN|+Yd{L0G z2)g`o1pm~^qtsdx9Eh$UF{YV--G2TlM%U_f@vam*Yp`70cxpT8{vEu%Ai${@NHGwZ zTdI2MB#~|X@8G@B)w*X&39OdZ_Ax;Ig2k(8sfauiTO<%&b>9aw>Q9X2znCEFfc=@NU|2rX968GJQ+XIXJKb8E?+x?v<|Igz*b7L3fGSv4tN~!dg_MWgf&b$)qO%w$VpR^H z1^oZT)^08S=jE&IC;tB!kJcd7Dqe7t$;L9VyBk$5kJ6O)a#p{e)7Fz`sTwe@Acn@n zB<5S|uEVdgshr|qprUJ(nZ--4ZTnI;&hz6b%4kL+n7nuJe}@@ZfY;hM3tn|97!r7F z&K7g?t2$P}RN!~(=8Xl=jKm>PfHG0taaea9SW3h9O$1Q@|7{I;1(G|@w(DSmgG-*p zJ$qNYT=XX7-6WKM=AW5C_}?Rr)mdq;vK8QMsQ&o`ZNJ>*S?rE3@1ogySiZ~o*2hvH zG)A7^8IXnB;7zc6t)CQbJeJ!Cwz<81SpaqUxuCSMhm?kUWe%}4iCMOg`%iwf>ioce67udTc2XI6aT+`;3M#+L<>cEZsCd(%u?eFBN z^M8Bk@T#Ie7V`h?t(}_v-`{<@|K(AhiWOSDP&~isO|sb|b9{T=aoM7VQ6G*e6RtzJ z;mX@CJTv_&)z&F&!BKmzd0}-DLl)&tWu33xij2Y60w;P?Zn`zZ0$G`JsOvV%oMP;M zXa|JF-#kY|rOfgU30rYxlB)%H5t(*H)N(;dQErYOz?^Yv%1PFECwmv<&jFM(ORBq{ zOYwo+-lgu+a0%1yhaAAIextFkd(PtyON*#f2w;eUOKh|C7LQqsM=hk@P&ChSR$AXr zJFx$*PhI|}Gz0xIzZPzop!Kh~vIdmNm?QACqKniby22G7YHvrkXc$VfFS@8I85k z^``_?x-Y$33_z`kk~DkN9fT*rS(!A17p?so0`@AlS{{r4zOW%;XP zmMKpUnh$-o@%~doF#7t4-}l~b`F@RV@y6bry{sHqLYmaJ<2Vii8mIZ?i$03d3Gy%V ztLA+|dnk(N6%LOX9g#>W=X?O`Aq_4uv$yE2FW;Q0;WRgZ=H^<6;S!v{uffxqGBPC*9^-JJ!=2>(NC=PZhXWp<2u)p`_{Ns-YCntyd z2S6FT`TNJ8j*QXx_Tx`SCJSsA-ku(u9qyYSKODY)Z~i>`N!$`o4=oY{CH+weE8w$qit{R)BfSv`A;W@ zX9qn&)<*eZJx}7bZr|ASO!F+1|*;qjaDi{rz6Xa4d#RP}nzq_VPpBH!*Mvvs9=yeCO^-TpmG(+I~n z5hyabN!p5zxIV4k_T45xKcQ5pXv!HEIJUt4Cb~(3{GSFZ^!@&63H$HmZohv2_tXC0 zqdbqg|EEI4+V=g9n^yksTa|bWYg5aza`=XWSl6L?Cy#2w@rl+>MkGNI`2{oENAY6m zF02?vL4Y~`Ktn4`>MqvO_=@6%vsJlSEBBg|{kKJ$XFUr)XP6gPJeZuCt%*W8`30BT zH#J%=VRx%m3PR-s-EY0#t52Q(uQ2hSc^27!+dEbJZ|B9!-6#J47*CVRZ*o6XchBZ! zadPcJmGkF!&9@)V1pGBZH@?;~?^Uh#=e2ZRMb@sCbbu?&8d#(Y+-OJ`ILFgaO8=*x z&+EmO?fRE;u0KaHSKmo(}A^5^q<#&M|)W00!z^NT3+OTz@L0Le=rc%z1y-zYI+ zBRMzBdTTi8T~Z=G`D!2OX_o&hO8jV^h4x>+YX7}_)qnE;KFaee$$y&#@<|Q&)zknd z<>E4F5^Z9}yJlo`vP-t+mb_nP$z|rai98)K+mw0o&I7z#n#td}J$Kiq&i}P)(8bC0 zk!0G=#$q0HC;Dw&X8XuRn2hH+Ow`*S6tR@ndtZrBPT)H?Gzj&r7#=P~0@t&=^BWxEG}p zbd<%g)1TK&<^D96kXr9E%P$uzL9U8xH^t%(pPU1$d7AkDijIBe&jSAcqW@~AcK+w( z%P0Q-7|-gDVE&P~dW93)r7%3By$yJ-LUT&b?-95ZdWA-_`i>(`2hn2exx0PiUZzef>Dg8bgy_h($3*1r2C4fU`Hp=AA%njKy(V z1I1^<6y&-?NjPWW9nnyoos#QLDorhmFEPb7Hk>>9?Mn$uTLleAON9MuPIdp*vg_%2 z_|vNYA?>kUg&Mfn|5wZZv9tZ^N&kC{ryNU;9#+ z6>N|^#13v)|3#9P$JzgVvD0t3|Mlto=c7EooBTiJM3JYa;GglMAUhC8i+;5A1Z-~}^VC=kIfTei; zrkH+MHm?8n7M^ww@~5u<35iwp-xXZ_pq@qhzy00X`R|=qPx1ec@>J|UIqh#5it!a? zm-oE-bNN6J+1rX0wVD=~D{J0zW~Eor$-HKz%q#15UV~=f<@>-aOY|*R`ijoK$#H{s zCja|)j3P1d;HzO{8H-+fDsfb=x5NO|O+*vjM#nS}&=9|V_S{_E1YHWf5Knta7DdoK zgzgo1fB7oAIF}`FJ$RO}Xv0+g4Bj*O+Jo1x!Q1ZlJ_yF_EjPM%M*kBLp}@n{|b(RF5>^& z{g>7IKl@Mm|KmKr75{(G0ntCI7sbg{Nl~&s_e$qEN5}{J+1u zwOi%?FJJbb`2S-(cjo{9c@biNXht}1{Xf?IzdNs9)$ae@c{=~|D9?lT|Bh++Fa_@4 za8Iux)W@6PDE9fJH=*>>8-SLbnTmy2k_T8HaGjmpkMGQ#R_il(Llc3og$=zbz0}>7 zk7>+rr3gG}$}dS0B4kNK5|uR)oWmOmUnASF*@z@@F;$KIs{*&TgVq89jq^zb2xuIm z;s%^9ctgY|Uh%UV6hChApc0M+bn$imdkq4b&R~cH2SGtl8gU4fW7_nSLiYrE8E3tS z1}N%9WQebE(1V^T+>=#%rnvTie~?qi3&>*m`&eD;qwD028+@5})jN*8cB*W*FD=`4{1@*2s1aP z(s^-*c7qlgZ{Q`C3I3YY4xc@j?BsvEP4K!Z(VG$VIy%AoGKVH>R4FSufmg*kcHi1` z*ZBt4Vgj#{cxj8M+DpD=R1RRfgZ$}Mx`TY9X#ihMB3Xf$(7TocIKorR{!o^y)&G1c z!^k+H46nS8MfTsz7uECM+q=6@`#+EJtidr70<%PkGfe<+HNgoCGZKX)8AFPKOEkvZ z@2tVugmAS1Oqt-oCpd~=9MPc;3n$5VQ|eDhkSSId(>d=^5_Z-g!DGc!;d#pNh+N}P zv*AB)_#k-&&_vagAVG>5L?pp}$KO9aKNXbW&KkU-aZD5V@y)3eyu9O&iRh{SYVbS$ z@E6uo|FthB%~8=cPD8l1iV@BNei?sP6LE&`fx8sW|}kg^7d?g6-vMy9PQ3#y6+0NI-a z!2otf^Li@ft$eE)i2P|xHzm~JBA#{4i;KZL0J>dOz9`-X(x-&90hQTLy%0xOVD;H) zF~n&^XE9DhBPga)Q9<{4rn34fO%522=@fIYL;oSa8b=mMX@V2M0dYMQ4T(R(bGXb+K3o&}cdubv7VB#S=t$;F>-(L~4ICq> zSqscy(Q^a)m+Tko*5&n`L3e#_Iyc?rpSzU3PkE7_x#}oeG8o9?x1DleBD$0ykynwGJ0w!4tC-djs%JsA8p`(hv#k!R~&?xi<>UQS~FpEfX z;Uz|R11GlVQX{F6~WYIHjBj%G#EX?B`Gxg`((! z*5Sfx?B?HPVRYaUXuPPFg-f6@JiqvmaREath2~2sW-m4x;M( zwN!?ucDVB7UUvhp1eatY%fqG8FI_1`(1J)f;Omrf9OiI++kZH|Y)+^-^kw;k)R7fn zG*N$B$g1US+jj-7Jdb()s8&IHnCRSCE{YUTC=6YL2fUz^TBYcEiqrtM>py!NN76yj?c7 zz$NRbt??GPK7UyO-Cqlaj**zO0aw7KiYjC&r7&eU#Yrgt#8b>>3S6_}WZVQkeByYN zMcQ3%Kape%p94)C#z~=FF?&Cq!ruGu^A4p}%xDy0rrl?J(CZcV9d|hybAL?4Bpa%` zl&^RfMZ>NcSvSaQ_r@6sanF3{73-w8X8PaoSA|b8cTtq^dG`LajINgQ4}vQ{60umH zeRKS&_W{RJ!?v~Z91^{V#H6IKJ#f9c+uRG*>k$pHq#0r?5@kcx%0CD$XHR?4 zJf7u6mx1nq^Jq8lmvA2K*`5E~;9UUML-3`T!eJ?NrCm-D;q3k^!!lI-Lu!x4SW|F1 za21^_>OfaI)%V0L*^k=^4<>phRd`Ot1Tzp5lz_QzedGQrownfX z$;R3+{E1^It_L@v0dnA6P=+By0+q@?NJFV6FPozQpGk9yj=-q`&o9zxZ~>ITg_HD5 z+RZD3i<4jG!F5W0DZ`cHEQk<^YvnJt`tQg}=F!awS8Y(wqr1&;{No!Xadis~^Y{>p zVP1Zri!xlbd0d3ARl((CO{%wdQ(TNKtr=Am;e^_-E`Y1iPr4Ycvd{FPp3xFqbM){!Tv9iwso?NeFiQBn8O2@ooV`eS z)C6`k92E)BQvN~f5hv5->fXSCvP5w*C5&oyc8VC$-kB6}j+M4&w<{j1;U?3!jK&{b#ZV5 z7oK;K1MOa)HdkZD30Lt1OW_kGq1Gm)HDSlL6ov~;Uc;p0XWO?6_QH@z;ew)EXXltr zF|)m@7+Ic|e~@{l;QG;t>%9+LMmVh;t~WNx;2hBA!Ieh=S0TI?a0OfkCK7la#^%9g zBZ+H>yFXlMl#R(E(_O=Ltc#je6;ULY6^dvw*4`)^^<~RHXdazw%NMy7Y@khHsydb7 zxm}MFMziq*L>g^8+!rIrh|D#t+kwj~KLCmrxblFyW0Tyr-ACDkG*cODYTVM^^+WJg zBm!R0#trP}MOE-{+`b>RQn>Q8lWokRC;$wPFjJ!6ExK^nU{E`3#$7EFZ_~T;od+9^Od1u zDiziic)Cp|_jt5>bkR0h(T@{;v&4?sLrHAWHd!NFOKh1v)LBqUvpshk@0f;5ZY{0` zuJ=o!OvAP0-r`E&I$jhRVft~M!-ccC2$Qf5?i{W=hAEZc;@aJ<&EEOz(R#+Usk^Xs zxVR)&WXwp|yZn}SO-F;izvXYwjhd*#bv!3(sS4Mku%!nYV5Bhk5vfxZWMq{@xhO9)NmD6-Kph(@ zyUW_Jp(I?b;~P*?GS}FpIu=W1URkbnV%EwXEfKY$v-7<4;;rQt7)ylh0{J#4Z7cW` zz!oSPY03&{6sc^rF67U%B*L6Gc4&51b@NAm1U4PT$XR1rN_B#M;7jb@3!p;Baf zB2If1`>kzQ4ZYMxc%~MHKCEIEvSfgpK!on!WdBin8m2&&C@Plv_#9+PzMW>mMTy2E z!|GfXM&Ys^*R}y_fstp~$T`#DNL_0p&H8Xt`zDPnR6}Z#GlmGeiU>&}%WK;rcWCqBp!-&4k+RjQp&aCQm#4$?+o_dzgIssh$K;m z!Sb%Ei*8wrk`i_$IszX5HtfP8@9j zOqxoG2&t}KG`5$t`Z$~;1EwKGyMoHO-_$|A`i^ePLAlnA?M|nyMne zFv^uQOD@{sJ!^~}m@Dm_tJEa#ce^zQnedV7GSXDK(k;) z77f7VXXtBV)sc-=Ak;>xMCM5rwRxjTd#EZCekG<=&CM#F8~FUC3Y;_lO)xG5&ZVCf z0cSEkv|(!>k4RGFh0qEuzQ#c&Cv7;(>9mOdae}vUl}*QkH>G=QX0|^~unzvUd_$-0 zQ`vrSSo0(1#BgUk)Zv=r{xxNb?12}x;4r^hAq=yWGJztM4((13om$GZc46KEmq25` zr9$7|@$Y_OuK}((r?ng4T5wAHVc}RIy-FgA!hP5C+Y|{USy5AfjwbISOh-8uY^z<@ z*HMuB13=d4L=LV|oJLA-t%c60^er$^XL$UqOt;gqHU8 z!_sEI+5hz6dmPa#Rgu$JG9raK?F7KrX@nA^Dk4>HLa(fff>AIr{R|4M7Tri>8ZoUC z{K*cADYr@g{>5oBa58p&b&?5sj;`trI?fm0_nluV7g(xt@W|%W_Ay9|RBE_Q3nC(w z#^O|tJ11~&(=C*dP!>VMAl%x1u~YtNGShei*vem<^tS5!SbeM}0UP0fW{~Jx;JV3E4gQ+PxMaGdw4KC0ZRbp&NBD*9*W%ABB+mYU=b zSgN!%?#bW1LPYdpB!##OWM$NL<$S+)da%1wS~*S-Oknp`%t-?k{g?-YDMwXEF;~}A zGb}Qe!2h0pJTg-e&D5fC6jfr2$@%^salE?&I0r@Ek76C;Ey7*N3#JnhE zJVVh;fwtaTHy#7RY%HycV^s*5!G}+$XObovP9teSNfq%g+i%U(yPZgM<||_pa!BQS zy+TyO28iFh{FXtPZdm(tK|&=ke5kdPx$e$)Im zZ4S?=jq~Ljwi)YLHz*dEaiE#dd%FqNe(es@Q6W;AhRP67V6^Y8`O2%L3E8P2CR9>? z0g)8oSBwaOla3ZDdOJt?^3|cEl5yd~8hQlnd8@X{72_hA_(l(RCRj1sN zoU@EEoJb}tIZVoEidi&+m@uZsQEXfIosLxyt2;ykB`EHES2kCzlzDyEz#N+;pBqChsC1Nw@$n{ZB z)TSiYY&=|9&ccBu77ozF+TuQZ;>L~3F-YZC z_qEy4N}NDoZE9!FEI^6m=5{%07%edN6oz%-7(sa9=DWNAPCGXDVrkQzeQ;rm+a^*6DL_rk{b92~8P>4~Y%y2n}rG;!^vRjELL@G_ih|z{bry&iv z?`Atyo_C^B;1QbiRG5e403;dr){IfqEqdvi4L&*H8*{jZ9^^7C!?xVcGYVZ=G7T|H z@c=%i=;sV~xL_n1@0%WqqUr{i{NDOCiyNh6*)*V_?(QgH-|CpcigxIzG~aH7#4u<6 z8p)O%hlp#tOMl%jsux!zXe7L2D4h^dSZS1vrA09x1eHP-GhsxIWK~#SMyD%XO^_t3 zJhP%PlWz zt(WEZTQ}D+0W+mK_M=m8NNMyPXxZ8`F>5eHoCGQ&-xO3Y_A)VP=&>lA^UP|kPtCA8 zH7ypZHIM7qX@pn_T4xUzCK!d7*$W=z=QqXIT5B<4mqtD-I&qiG2Xw8I!z;2J+lw>! zzUv}VZN(`44n_n=pt(-^|9|Jz_S5|zkMgX+d#wF(gloM{hYHAbobZg9 zb8~MFfn^ETmx6FWVu05cj6^VG(MOV0a<82N0Y+%taQ+ox(iE&?MckcY0BxxqN zH-OJyHp^dZsEPdMd?~k}%Gz}DhLlTtY^s3XgKPb)t_2%__pYr#u}|lzB-dW4z^(JE zJc!cfV+C$cnx?g})kg>5NuDbX)iz-jGytzqv?P4pZho@A+pUcRz|;D=r`6Af?-LTv z`APzsyrB_gvM)9sqUZfh*xuUOgzc9vH(_g|sY{Dr=SU{}74{;`1aB&D=QAfIuTti= z^!tnR>$Ze{t7JBe$J3jS-T;y;ippQ4hJQwBBuMHFpkMk(8mmMkxojZf6eT!nc(1pQ zek^u>|Ev62%p!vTc3Ivr7HHhip1JLgB;$56YsZoxYI-X5q|Pp3Na$5}cT$-qPf+vC zo7-$kf=iXvztT9vBZm3poFoFXDbhIdUUF|fl)unK;iZBTyAQvg#qn&xM*LwI{Nw+l z^ry>Zcj8+&p?_<}y`raG{;$pqSfu~&zO2gs-R=I<{m+l`JV5^2_>C<&-dYUD*8JsD z&wipk&*p8O)kJ%s8=TCb<4}sag&%d@8OD0EKT&Ve3}Ah|0GC>#W+1oh9kJ1w~Qs&WRC>3E0)};m%hXsZxh(aKt5BNFa z62W7!R44`vZ;?To*;(=cC$Lm1l9S$CghWG| z(>U~&G+go`vm%22;?kKQOB(zsh#5*gR~mL+%w!qA{Lhh-{JmZWNAg&BXh3XV|J&@_ zoVqXEv9pKoU&G~=uo&eY6~Ko6e|CN`uJr%2(eoFF{(t`%=@{PNoWx8)^4Vd|MUB|H zPSLS$JV7EY>>@^o24sN~2S8xNrS6BT3H%b1l}q{1;{th|Bs&0#EVRZkItCz$8N23; zdaDUs&ab$-7AP@Pjw3K))FW~NW5%}yd1sM#fw@fo~fM6Yg3mQ@jl%V z;e*G6h<_+Lql~JR2C&kVc%5a|eVF@rZOCexj1&I{!bs@p*AqB8asWseaz(T`;!O}J&oxdMsGDE+ijRHl_A2^}i)vB8H}(r{tu&SS>R zdxb&7h{8;d8q)7@jsimuHA5aEZ~*}q2ogPk%8WQO)KGlg1fh{3P1=a0G&BUxi(#hF zt$$iJeeTJjBJz_QdOV!dMJ#X-aev_Pbf6e&f&)Ws5D9v#N+ni92NFKlF=ICg zpb32V=ve(kS0f#p^iKc`$&8_&Nriqye4lD*BFqvY{X(~cjEIM+-~lXo$VHYbm8;`S z4puXn$x@?>4lS$3qVzC5MrrJk+fvQ*MLwnR9&X zMBL9nqrk!Rm^^AuB4|}N;3HmQa)~$ z*(EGm61U8PFvjoDzZQH>8D42&n!w4klWl1DiAuF-OyI=l-X|2wv_U_nr8yM)GT9xD8-O2U)sC=rLR&EzeinM_{JM zm|kE+N=mv213k2K_(-utF<8XZ$Dtk^1`-*1S{{dv{`dgAAS8>>->oOT0gw1j-$Hq3{I`Z1m#8m7?JYI3=muwlv>G2RL(I()htF$YWBB; zGA%6Nah80Y#yJiAu!2a0J=``T+5}0LeV+(h(e8>hJOn^z@pTH zK{mRYW(-JVJrX?pna_#`KEZ&860KrBMvSlYM=PFT=trE|PnKNJf9dgxMcgN`;?g5b zFLAOA29cClJSj0|G@we5aA9T0M;_}D_pQGoy$F#CLv_np9H7St4VqtDZ@BMMDPpsj zoW=gauCV^TMgLAGeRhWQnSU;;hiIWcgs@*Hv!`ETu1N7%W@B^yx0%aW541?kV^-oN z6&GZoYgTHzTm8Ac^~TOdW4OxNE*1S$Y>WV=lA~uR`Alz31dGNuVwaZDf_Ge*66|UI zU$g$pW@B=A_2c`uH?I1u`tjJX{u@6ZomSR==cnh-59`0jNHsGn@;{ zL;*A1T4t*Jzw*HYUR__Bta=p^`uSvEW(FT1Gaf5b%J9lo7JT+IdP=&EUUDQBBR?|fu%zs_|q47051+kd2Sbnn)sT<0=`6^{jIE%qIJ`I0Yk;HYLO%9cV$;KIX$`{fI` z?~oyp=oUKV;lY)+o|~?3UB*4aCOYd+iNz-USgr;D4-Z4TW-C)A^NoB!nk*P4L`;wC z_*v=IRV29Lvv{rt9}In6m$Y)PJUu6XB@!QnThQ~(u$PSPdVej!Lm3AJgV_qM^W_jw zC6UcFquEMJm@y-WM6{^P*XIo3j;dwyUSl9O5sy&}RsI=VXJ3l&H=0`Y|38<=^lD&XTygFN9S_h zevY(qR~74!XMfy9PMU9^r!zFH*K&!58x$GMyvI!(PG+K?sWP4O$6FMF`Uv-W{uO#~ z{@nbe!A(4$)6Z};D2QR`o8Jl4*NGj9*9%rt3D@}tCe%%SrFrNI zNtmHWVu>JmP#Wq?)#ntMpNtY_3Z1QQsDy83E7L4*u77THUai1ngxF_vVFL+5$JW-s%`&GIy6=+yd57LiIB}S7S|=52q{JtJEO6y= zXhu*|+8%2j9tOpev>I8-bpQbg$pZbsY?TnoMupV-)m)HxgZQG6Z(S156SjHc7|dX z>I)&;9Mz3?n!7*K#HreuYm&V;Tg!D@H+nb7pk1s#1lv(u10v1PI6NnSY$AOj)qgvw z+h;ee@ahS6hIugI{5z+v_MaIw4#AzwK%4ykr!SsY>_6idrw9MfqoiNg{_{g3H3u`$ z&SoIa@Ey)|Yh}8-n6AzImC(Og!OpU+EKYmGl@7U>oeXW1gP3<3TRNqH6 z`n#!=Jz5;q4;M#O$Et)U|{hl*sj3*BH0^c?(e zR%`F6y^C{L){e96VUi^ymIASaMkiLnv^Oc(?Q8Dolf0~ak_y@KM)v}-eSh?&hF7In zBR5Sd6N2P~kahCR`byUG0>k<)g3V1|xAVaI5NjI{O6NG-Y%{b4VeOztlJc`0rOl0H zYH1!@46IWjEd}#jW#CquR=?ufafG~J%`pUvb7R2@KY;wqS>8Veg`zDhwL<8{B9Wf$ zJ%l!y0SpF3sqb5SfSQ#?oqu+a&91&SHidt$BiYQ2*G4pR+f_(zbk4QVwES<~fZA@h zHoBH4tsCLOVb(&n@RD^S+UN{x;nC>!>c+D0Znf|zd{*6vZW4_;vFPA&YN2SmnYt0S zyh$x|Ef-Qpvc^-?M$x#3x)E#e0<}0H`&ai>MZHXuH+MGWm9s5bR(bnJ6h;wPL8ewQ(s0a#nge(k)-XXXd`Po zCc2U?`5#(|mz)gU$T#>BTG%u=3z|pd&^G(nMDrKlTT`mAY$PhrqO2qNSu6>wsCnlW zfhN|J$QOq?MO-Lri8ClBcGq^eiX<_5iy=yRE%*$JLSRnk z{soo&2p}SAIf3C42~*2|m0zvX2B0BTl(09*$m#|?9{O?uXI2J91LU!)c_)zgbZcb4 z3Z$xeI`d7Hru~J(YJDwLl*pG+<5BynEjv_gt=F<+)-??B+<#pch4#Vmf<@^#c@3LV zsjjT7$=-^KY^)&KYW^br5; zQPR4_svNq-w%XXX;xs}@+4FZ@E&X*^j{hC~g&V&MsTvkH8x>n9Y&0@Qg5FYw3p;{O z7T+P=CvuWZw0}>WiX}43t{+}rzb-$xzI;;lmpYqtYIl8a8jMU zQA{$W+xB$&a1u)_l}ti~nSGL50z51azQ(kT{@-;E;Ku!b)%ee+r>BSc|Kp@b)c=Rg zfcqE*_J6k_a0l+ira(PFG;9mpd8N1A*1*F!vyarO|FeKnJwt(S(Eq2W<7)i3i}BgT zLH|ESs>J^!^l;`6~P4{inu>@8D$7OKz_A>XKJ8%DF!eRe*UBB0<*B@bbWh!M z+ey(=Yr@}V>N5VPF{*Z+25cDrM;9;F;{TkzIN1LlC3OxO);3Yd=KhD3-H!95wW}m* zCVvN&Pg5;Y%ok2!ivv_$`hR+f#jn->$7j#a*ZTj(VgCOpsd47$W?#L*0bJa=$Iri1^)i|IMMjr`<+Vq{Wc+c?9qp!ti>ne+=X5g7*9(FA z!zwkcVqLgXKq=kAlU@r@X`ipkKvkpvU4Pvz=py6-_rqh;{O@eMcK_qW#UcLxqoiZF zCQ6}5!uls)J-JS67E@-nB1FXdL>4Gr=NR5EsWh7_^coxF5*dR9+~4onr`bVOIR`O(bwmKck;Sd;gDbUHCC%D|6FVlt6+A1uz<->$q=j{(pI+ zxIpI^Uh^Q}A^i0E27D@{<1VNgntv1cjywC87@B{x2g}7!|C9YGZ^L0hXhytGab#j& zOXry@?;_`!J0qW*XRZn&=h^={$M6#ol*bY--@KKMRu(^_ryQ61NQOy1!GHb=y8BwH z>Hk0eGScL|L4Vce2D+>DCxNFZpgnP3&1RU9Oqef>9aI@ z5rCkqVK$8Wa1oabBDT>F-i&o(`#Y~ORB>1EUTB@@X^!R(B*?@^crtA9cIU^;P} z>2x|Ha_RiuEbnqEQt4%E-CeZP>C^yuZC`ljIQMW(1PM@~fcA6`Z;DH@H{{(W-RHO3 zcFFAAPX--R_nh0Mt?1r6_b{!VvYIxv#QP;Cr?jk#hRwBrF=H?nJlL^?H(pg-YT*p9 zB8vv3HGmaa^hcW0ohElCw|}!=16&+?Yi!|51J?9Bsz?T2DuJrw; zyF9ZyOtuI7HjH)@2t?|W;0uI5`Vi3kgljTVC!5% zTe*AqM|vkW7v_HKH{GBixz*T>KWACp3PK>^0!$5jb+qPIU zz)Hm3@TR@L2te50!d)&>e+u&cyuHEcz#5ptK<`=nl z0=!M_Y5-RqU(=j$et#8uF_LBB0nCM6+iwuD#qII; z0-Q%q+|YgUBHCsk`EVr8SL z$_8v=U+zUFdI7Evf;n9W+z?-Lmm2I9u-)3NfP1udKfpCXIj6ON>!N+`hPf}`Zfeae zH~$UPniWa3U9H)XNuN@!)rTdm>2wVdPanP0l>?ks4JjqSrI@LG8o3u>S+=k?1J2B> zdl2KEfPa&~tQ%Jxd6d^DUUAqVUmLBL2CyANwjb!8fbB4{Eizn*O#65;3>C!n5ov4G zL_@IK9o57ZCU_`&>Z&I%rsd2xs6Z{LG5nU|P+_=|OCB>{qfA=A-RP#JSoaGOSQBj^vc@(xZ3Ju`KbRJT|EixtQaEel j{%K2*c=)b+Yo=J@;$&dM~YO2v6@onYv1) zRuI{eFd+#J0NT;SzR!LIkRU;dq^O5Dla25qkx8KY2e8oqx`D>~WP&{*iNs!rl2cFg zU(5-Y?t+BDH@iuL!C-K4cBcOy3@O-waRAE{;!zr+?>XC*KT)rx(X(-@sr; z{998ZB;nr-w&qo>+!vBaOn*i$D2qmLH*`oG=YI~}=kCDqQFxrjQa`$igd{-#%Tfc9 z!3-nhL?V#L2mBB-f#3y+kjQ%h-eCY)W@pYq9Kl@5Sd9Ap8I|*7;(9FXE5bZNiK1U4 zna+D$+%xWrk$=<6XsI78qLOhM&72;@oP{XomkF*#dMb8^Z!3^(!ZDM z;0Ts*4>gGO>wjmy&9Qsx4xJr*{~9iLBuG%~Pywvy|9@xa7sE>bKO3H(?)(2!q(gX# zQ<4M{($5Z4#%sjZb&8I4>v0k*Vdn`tG$b<|IRG4ECR9IMjNrF~EL_U^9^=UBB-sIw zWTrHZ)-eEa5(GCapx$BxSJP`IZ#W8!q~mam1L_emf}!IGVHPpWkS67_k{c4u`ni*PG~^oLIGH)O1vYn1nlzaPPr1Ogq0Plz|tIIpI<-M$Bp zqf(WBfkYs@{_*k!aLjar1KHcGjZL*LJ&z@kOn(X8#^}*0MFID7^<6j+xGm)aF=t8O z!vr&O{`bJ~w}f&81t=w6;iX8wnWNTcz@{MQSS2f&!t9#rL`;G}sg-_KDJrr`l9Ub= z`Pkq?B4{+zbf-xW==T!CI3N;@9@V5@;}khY9x8?`LSP(1#t{U19Hky{Ca9qJstF=3 zLw}027D=ILaGVyyOrcwqZ2HU-eM!W7(f3$1rL%-%FJ^wvW7$B_R|I>SS}*4GPRTFF z85NQ*`VORgZjvClO#zMI?K{WnC#o9R*ra{}U_>SX`bH|$BjWp1NfQYy5mGNyJBXNg zmJu~H&SGBg0^=cxW5PqmcS=;Cr&P>t zX)c5(gN$VQA7p%7li4LfJST3M1qlLnkNyp3QySp45~dLxJv&;5hM%fbi^d3!e1GP> zM{eF3dZs}Of)kXT= z{D+5dIi1o-^*w=7(rC6R-&{D1YJbw5pq~=aT1v6+J0#CLL%1yZY^I@!@1w7NE1e%RU+IU->Nu(zPvcZtj8mosARt0q>j8qPfK_e0Zizg+<0S&3-B$`qcL2Yw#!666B{jnndGP$N;>eiJXXS z8BzfR1P$W=O?Lq~ClUhPt%XtL|CJ9OaCvj3v#M1{ zdQ!NKT5_ZoBR?g|iCW^N!`tgumoNYI$_@R)j9tDY z)fjTTJjKJ+iiJfQ?0-Mryt&DBF!W8<>}}dOs&}hWu5%T^f+ZZ37W*DPe#{p+a8NT8 znWfMHxbXPse*6gTYYdPGbPJvG_~?o|&&}4iZooVej8xX2Q;SXZv0McJ9v}O5%@(>! z`WyLxG+i(XhyyjQ;|Hl$S21UjO_He^d@%A=U9!qudTLGpbAKd0a<`!8>tQb)-PKxx zMIs3c29pKc@vjJGibuF(I-$7fah-^Iyru-E@jkv@LxKZ83Oj_zDVs*B@>jHjDNI<6)eYIfK zm2jP3Fr}{ZOT|M~NWcU=k_ZIB!qQNut3IU|_~|I2r+?69eM1F&J6Y&vd3k-S@+t); zV)VXqovSJERHvszGDDL{TQIYfNw}v$kQ=s$8jBV93~iTIF2)kmICPpfta+I?uF2Dw zp;PMzqM-CAU6aud-9NB}GjEiXN@)}#4KU_3l2bVNv*`U<98{>KeAf}Vrr#w| zto9l+v40{ZqiUz`Teb#YH+|`HHa9cPs*C`jCM4$bd`lxQNc@!(lkb{H>v~liy29ij z`*F~TNKI>gZbkVYnp(&IY^KRsfZMnOSB(D`rzeB*_S89MK2RPTF9vR%$;R2Wn*$VPe$5ctyr>C=l+`T=NN>p znD^JO#xj7AL}Z43Z?b5u+Hye}2}vTav9{f6R$i?-JPA;YoR1%ENh%DbZnKqCmF@wF zseiWJixIp%urp`!b@0xz?`Z+*wOC$LJKaPRQ;YJ0@8RH{NN;}dPC+cjmL{~h!L|ln zEtI^#)~qwba1G;0MrN6NR%Tn?g$=rfJARNDDUlS4EQw=L2c;mfHJHNsy*ZkK#gTuU~(X0+q6Qykt=6?=`mES2!_e9WW*3zwWa@*osw;S!E3t8Tj zj)G#9*p|8FIq1f_D6lna=6C0+@@~tp-UHPUL4Dy}L)sM0_CRW!Y&BTY9R|yYG^QF7 zJAl=>NtnMiJIjH%@c?4c*b$tH@3Lci)EC|!lI<_;48<6W%(H-GKP__BwMR{dX?P_{Y^)UN-B!zKU!@nCRryx0Fv zk!%I8CZYX1qsCw6AHSr&H|$7zt#51ngKqUWSgF1bYH&A|vPX-9`ZhVJI+5+;O{uN} z$^M6gL4A$~pplP}gi~8FDh(Z@J^&@mj(LqSRCDluT9UDeY!nB?tgUe7VSk#%5X-n) z(yUTY0ju>JmbKBf9Ae!F7k;l6vW2s&8_`Ce zRtt|t?^QRJg_Ejv@t`w>7){1)`{?flYLCQhLK&SkLXMR{O7-J7i1Qbic@EX5f|QzfP<6-{I-${`~Kgq-FC_IdqF{La}YdS%i|Z=kKOk zdT%-Y8vKPTWq*iNEt0FPku4Nf8ku8G?`VKCJKB(mMUm|jIZ9{fM^43rX|k(_S2r)p z4{okr77q%`Un&ksfFn5iS0dzV_2!yIuNl(}1pbpo$%j%to#!%I==Vt+2N+@`Ns#S$ zsU?{P+sUGJWFzY#{EhJm^>rYrEUXr1Ou|eNa&yaGNPnXd4D_Av~59d*_>-wm?cN5+oRaY7m(@fd6O_kmrB?3z&)3A1W zpX{0d_kZJyN$cqUP4|GV-2b_>|L6GRY`_2KNzxPQ|NUmruWcCE-GmYl zM}fwjke^P?e|5V*D+WKP+wfMtUBI+l@m$qGWPdjg9F=ywEtNc~+woQ(9ckG#ndYoi zvo7}oM0Ry6ER8KPy=CB_CQ74vlYOt;hikUr9n7VS4@?V4=CNP|Z(iIug`=}M?xWLK zS#!ZbK7{2L9@J1PXRfrSBz%U_@-R1WEMKCKsf6mq3N_4^${m#Dsl1PP6UEXx#rxb^ z`G4tBm+?P~QMU0kVAcBX`1rJX{^R_7KmI>S>Krt#ZK7c2(_i??PQCw1S9fd9>z1xQ zshJv7M5$V&m@OQ`mwt%u3r=11e|Cw@C)58IXXjP@e{r(k|M4WLaUSlPuimf$q_}m@ zO|SXV(b%ga!@ts8U-e4z6AvqY+|^|3I)5*huj&<8^$X1|1e%GVA(Y#m72F2A#3!%s|1YBBF1qyI%b9bADj)V_rIPE zkN5ijNzx(Q5Gj$T*LtL1J-tqElF&eJ?}>@`p3G3V&LO;+Q=vC3F)js&IR*jD0)IA1 zI}(j%->aTLB)vnuN!xl(BHuZL2xr=U0^i0Qr}P8*>1_}H?>iTMih_l{=`T(pL5v&% z8lmgBFK_>SD;Y=U5MHn_WD)%Q;ud_$h2zes?CXD1_>MdIo%i*>=D~c{SO1tl#a+}d z2u+ChK8bbgd*M8D#eM8Nb0_4z^MA~hVeCBnU*`~hCY-WFz}3rF!cm>`SM;RgQXfe_ z&BxiVU*g2IrvLx+%k`_*w>#vqs{ap%mH5xYlZ)g1{SQx)NE~x^m)(k7y7joY%b4RG z#ryU=pSvFWv{|a@|M?Y3yW-K_|Br_!7bn&G-;dAs_rE_$I;^`J_8%C8pnn$-j`Pf1 z`Y!Z7-?uf{$qXa;?%Q=Q=lago*FI+C%|_yk?HdL(yKFwcWG0;{=-tNlJu2gH(aRo; zM~*Wdk0(UToj>V?QcgukwT!L1i+VgBYalP|3ojk#0d5Em0K)yLl`!xg}(=~8bZ$gUe)7i&gW@}z6gRECQ% zn(ZxoHO+3y&pd5^(JlP?$IBPR{^+LwTLWtRuh4`mezokCcA) zdMpR!r3E^3YXUr0->sWa)B?^<4lV;u3#Vd0FG$TQB7#Tc94kH$NOA}>7pp-P4?FRPxfUW2l?Q@AYRv3+4w(#3`>j5su zl5E8M0CvR5^nU_r;``<*)l?5}1bE#R&IhJdYr2}%p(zV_q(c(<>AgN(yWjeNJB3^v z7XydZXw9)1b8>E1!0XP|YJ)Y_O)qW0MO4Ri0qz!X7UHpfdT9eT@glc{yCvWve&mW4 zw!%?<))uw`Q!XJc1GWNPZi#p+z_~ZSMkY#tivXHi%YVdHfSclHj;nwhgLAHre=ERw zFwRviT!i-gye-_MA(jB|NJDH5*a{}v-s$Y%q@T0X6;Vo8s5L9->1S1IItXf`smcbd z!>R5>Cbj}xA6Ip}47ed=>n1hWDPX&`TLEv;+S>uHiSs(H1zZ?p#G)=Lf84okcp==A|N?` z?Ni~gJLKrYWMS9k0mdI6*K-*x1I{O7rJ=b9l5jsqo(pnTs4%LCIZFaxp-fx9-5~S; pV=ZJPv?hT-M2+2M3=jJ)Yx}fM`}B3E{|f*B|Nn);_(TAx000}pO$-14 diff --git a/staging/prometheus-operator/charts/prometheus-node-exporter-1.5.1.tgz b/staging/prometheus-operator/charts/prometheus-node-exporter-1.5.1.tgz deleted file mode 100644 index 06ccd77a512ca6021f08c9048848b23853739092..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5999 zcmV-#7m(;5iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKDHR~tFf==|J&MO~A6UsVwZV(|{`T+a(NEi8p2>cTen_yoB-Kl$(yNpR9-|s#Dw~Xl*u;y33x%S! zKPE!?QxZo%+&8t`?e^~Ww)wx^ZrA^BZ@+2((AnDF-Q4QD+1cLuq21Z(?6!Y^_PtwP zA*E6g@k9IGZPkhUH%TO+A5lokSr;Z9k0eR{v+Z|&^xIyDG7vOTCT|b^j!_JORt^|) z0fqiRMSr|8kFg75r4rd~wMJBp(}5rGxK)&B=@MUZjq|?Z^qh-OcD-h;vSz6ruL*g6 zOej-?G8Co|#)L*)7*WDRI-vajj&z1@)dOHeBb0yh?C)jz?@6AD08O>GB4U!Uuj6m~ z9q(Jq_olS8|4&GiqI?hoa6|v!Zf|YY`~REG-RJ)Q6lo0(a7fZfffabIQ&*M3_txNS zOeIhW1pcx2{#|p(MNE{!5Qa2D4RC-#LCgI2nmrAL!eA?nDSJB!Z?YD zLg{-nCL`>600I*(sp4YVg+Ed<^(n9SwW5mxC^G88#2G4pB#ojTk7zLM!qM=UtDZoK zOnII}F`cljt28U?jV6d1XFmTO|_` z=$adz$+9h-(HMEEy3lD$&l8wgEn9F6enUuQN}^~A0s|h$$U979&#<&Z!zhM3sur)SaX98e|%bb@T5jqhHej`fxPV`U4?{bHXZ=l!~Ry zQmYJdLmox^oU)OrJ)%s5kns0Zn(wi0fMI}A5;1*k=oc7sLp?%}if9T0K@4w(3I(he ztX}tBr7=}T%4+}+Bxx5m+wIu>h8Sa`)Z6X%)RFi{in3OsQ5=O{S=msH`l#Q$q(kL?DvyPt)${v6*0KGJdPB9RuWEVbt z)>H?nplZ6$nZgT0wn*8YJm&0#bCuIzGJlmQy0Ejoz4gZP#DD~bO9p3)ox}EjYjWV^ zr@j3`Uku~|-YhHK;~|_n%X{3OEZeQL>eolC6Y?&5YEh z%hP5_CWo1gFbH{TrniNdM0{#Hjv5mMW8F4c!5D`+*Vi0)^&;z28xR_6tOHm0W={dDj5GvF$yam?8mP-?Lv%-ghUZvD{c)0 zfG1k%x#+?XoBL?T*LuCSZae5SH3OTb<_hD=SOXopW;E9bjHd%Z!%VK4fo15Ni33!` zuc{&u!MF<~4tL%x1L3l(jb(b_d5TA-Q8(^20f$2jR2Po9W4|533Kkq*P^n~yDd6dTZ-hgG;HQk)#Z>F@S_5WDy(4~7sjl1x- z>`(W_K)O%)-|m}05`JLO)b_oZbt_7x1W(9_*m>MBXE@V%o$>vQm$RQ}NCgI#r0;&_ z@|+$bEuH`6-g)cz!`b1fuP)SG+qhx=xA|skt2Y1J-PzrKp8q{X>iF;rS|^z#Nkju< zAmG=NcQBZ0$BH@kO(JNInlL>!US2k}#dc;`LgUy?ks9ECvgb&())07_V&d~J=_E;f)OfcHuG5yTwa2If{~s-_{Vwf)fF^3{4P05h#J3bT>9Ga z_jRrEQ~R@@&C{+LnVTYYO_?S`W9hs$b4?rI!`0PB4cVh!4OLxO-U(rxDz3`yywa5dlULt8#e9N%#|q^n^=XCkp`3-MR}$3Su$^ zMZO{s7~(m^l%>io1ZcG=R|!WwH=8FDLL=>*`d-PC`3FxSAPmm6BW3+XL#OuoO6s&; zW~Q73&^*7BpwSqxEb}QFX{l-f)8uM>PPPT+ zU5#5!WQMq_t0m&jS20%TcfAh>Uyk?QAJ+I|ysy<%If4dBlDDfds$kMv8Y0Z}w1x$+vt|CI@o737(hF!bi^_D*)qh)r_wAAm$=lGz zdkTM~B%(u#p*9hQ6yN&|ZS4kL={7W35(XF$ZBZnT5&mC3IkpTKnw0_|0`2M_hIUzm zN_aV#8s;1vpK4e==xJYN!}pGc;OwETBsb=EPt7i%DLAK5q{rD*YCNSLo6{(A*sjj& zVz|m!M29(hvyVB8Gnl0SE6G5QQrV9ehMw-*)x})GGiQyhr8;)xxK?s+jbI`uQ$uL{ zNjCo^8@1YOySGuiWq%umgkRjMC=7_C0YqkzA6A#9Ezr<|6G@WpiY`kr5_@A5RFM(t zqB*Hk=N?-3{{z6#oZ_qxr8;;$Zd9O6M;18f~21 z?yK%GrsueBXrT=IZqs`l70Yy`tjhLpw|5RR5ST>TZ)*Ix0gW$>+gYy)g>`$It-zAX z`ur`Zs_rEmFS8C+FyF0zH{JB%@-pwzFDVP6G+fbZ-dNTEb9z?kqP3Kh*XeYg*MFWQmCyf>B$2I2$NNfI*o6Zl z_ES_|HWlmUr)cgnSN6aSc&7NxHX|GCe3$`cAukHZ)VvvuQk&E1N` z-G)eT8%+5?I5%hY0MaBR3Qv_D%tuq(pgmV`!lQ_?(XR%;A8GAiuW~NjONRXNN>j#0HP4h#^svKW52>A=f{K|k-*$pmZ zQQIb-cPr{;t87``=G<;n(dvSYs?z$+!h)rCY#sSNR<5+gu2B7~OP1_e8EBLmGg*Fd zb_TAlx^tfu8Nt<6qh3?5bY^C*OmU#5z}a*yF)CpUZlf(BYV0S12SfR`HRe*a^pC}0 zaw>BuIhAuj=Q!{qDiyM(TNZuWHY+{tn^wt6(#!V)VWF>Ce9qOOv$;@CM&4YgIijto zVpM_#RU%5lwTFoopk16bsy1cSiz^=Rs0(NNz0$XcPLQFLJ;4W9MQ5y3@(ZeNy(Y6= z*sA9)Ln@g=M<5C5led{4z?>IcaZ;%!xT5@|yx+P|Xmh$**))QhpV|Oz!&>D;N!~=|C1|ij*k@WRnC-@np@zs%(2?`?*+qVt;^;| zDJqh4=BWa^xbt3Hz6^TXK6>x|xk}%>4*Q~9231F|rt<%3TOdH(+tscyD1S=%+{%H1HlEO3}XJovJJcfVzUbRbc=!-@H^ip=m8)^;JK znMI{`#h|nhP_u)TNx(AqzjB|zWmUZSqS9|I{#E+FyR~rlt3WsO|IX&_W_|y6r}KRO z=SkAsdPjbnB@?^5r&2cv?lWa0Ki~qnH230iGvPLs-dm{eYzcABTU$VO{VvYpC4^N9 zE^6DEl<#3JvpDVVS)*pnSJkN9%&*FQq|(}4^_o&;g>2P27Hz?yJBduWn6PYyK z{Q|)wy!|+ElmEBdsr!GMo4Y&D_WvnTtz(z>0Opqs%-7;tl7_I>-kn1{;Sp8_2~&Wi zYRm=wmznANUw<@LOD3HGD$=P83=d@6-8z6wns*vw%uS@O*94N#Uj$DR*@aK*c%hJK zqU8E#ue>}7pVku|X8Q}~_7+78G(j#g1i%?C1yx9|T0%0fNr zuWklz^8ebq+qM3`wex)c`)SgB`~U5F`<%7-$tu(LXa;zMCpa{BzMUbshC&Zu-V)v$ z2bm6j#~_>3Rvc@$ahl%_zq3PKxm>>N?Ds=io9FbsrPb{J{lb9UVE;GYY}Vs{?Cx$o z$Nzbf^pN)7U0tsl`yLOooAUM~!Q&>N(88$`xkeQv`-73kg$ppNw(V)D^*>A*9k&RA5wU@wKUB zkSaJwbJHpjd_qHWWZcFE7$TDhltGAz8XLs&+wy`Z`y?R)8c{_t3q8?=of8r6 z^^S@{TA8Fm=JvhzBSPc+YqnFRGv=cW*u10Oe!=kSCae2htl>5xrmPF?;#8K6AhBHu zDq+5^K@UYtP0R%>T|n0wkTNSpBf(s3CThw#Mv=WDr{@)2SU=I#?y>2*S@wBHS$dH* zV06QFA|O&~e%sT(=DHyf=z?kNCCQ$Mxd1^-*bg-(3{DSCUCY}_lO)0znIe&!)|5ft zr&1Yk%WJb%p07Tw!OvU_XlMe|xM&`RRuj^}&zHk1%&f6W~03QjmzKhp^1 zR4R-!^>}t_RvQ1aLpqPP{=Zef|GnAX-g#dCd6KknkI&699%|Ko@x=na6iUmE;+5E1 zi_UP)m#aDgmCd>bJ#1_b_bxiZmtV6@%EQV^C*2%hUYb+5FY%AXm6z_;Z@^2&6}&V< z?HqJ={e5EtHmZkumL5RNJ}y0GQ;vqV;sM4*2N&!6q}Bb7yKin>yS4GHyVk!&THXG; z`$l&d25wycpS%CD+1Y-+|NA8An)%X|-RJfHCrNA2BTAvLYZ+GFa6U!`gOo;Q;+2r#D;c58qrd8@q;_^djxma06!C#Q zvq0JCweAZ<(FvMkrlowsLT?Qix|7#06M;i|fuWr`|E~=n%;|5=Oi7IrBq&Uhm2&s^sKccE-{u( zF|iM%_sW;&iTBDMkgwh=U&V>{>c6}-_(%lhsf440L+SZ$WyAAnh@@rV1poct(c@U! z|3Cb8e0XyDAZ^^#|J&R3{=dE1ejfjyB#|T$eB$D#7s1g5PAHz&v%fX6&*{O_(*A#P zxOedW(2v9Ww{d;{@9gZpsqg=6Z*M){|9+CR2EF11hhrY13HEEQoO-X|h)G4FNS2>@ z_@wnDe}duj%UisTuwlZcp1u3)!O34euix(noJk&`_X4!H3Dh^!t5N z(-BjGhpCBJ>UqXDv5`IooGYmWNhFY)a9{3u&n8*8>Uk=Q(hsK}zp*+@0@tNi*o*^ejlfE~|bX|6q=s zyjDw|6d{1WX^w#{7GXA%u zD&zKY4czbj+Inx^(#d6$zrEA4kM=wa0ZZENUtZnTT9mhc7nYr6J1b^i`hihiA9lVgf|tlD-wE&6VKr6uvn48vo12eakDBg9 zRdzmLfp2l<4yILT%7pjmY4th70;);}NcuI{(!48=vspTB^2kYBi|gK#D=f)8RnNCE?}}>f2eF3lc;pK-TLjaduiaZ&D(x1tEmmn zW_@?*wsxN&-O0PH1=m~XTh!w3J#Etcs!@J9uF-W4Tq8f_+i-8kUG>YPWs#MZvSbE( zD?DswU)AIx5c3k2Swws|(r)cwlmj^}8H&w1k8{}{4f$(uy#rOHa8k=0_jgp9vC0fw zdh}IhLgGq3_P8~J{-Sr&=H5)JXy9BR$vSN_ICjyYv)9$Yz-q#*#2$N+)n_zXMOftt zaTau-Uj>MqfQlCht1leFsxKTq09Cg)grzG9%QWyeHvlVcNV)k{#rqOcr|R}Y@$#4; zX1p@5M^U40u5!h*iE%Xwh%qFk?WRocHCpCGzOHXsll8C~{l%*brDcB;0ABJs64}cN zA(a82ps-QmOK3UcvmBND!lVW&9?m%*qsT?+V2e5DoPTA6p=GaoZ&3@8^*7a_SlL{T zQzdkU2F#*p*!ODB$25D8$pC6U2U%#b=(MG++RK~|B*C~?{4e=I{bp19R9|uW)~xY= zdY%9Ixt}k>8C@uEF!g=-nG3)R5+@P9zTH;TKRp23>;k9oZDs8j3^1&=d$kuS6wI7e zil(pKgVyF4kX0j@d(E2i((~Bnv9tl7RQ5-l(z*Lsq2_pmP}@|pZ>8LW7tJA*9%f&O d&TqZL{G6WCbNXJ>e+K{n|NrX#CEx&t0048A<}CmK diff --git a/staging/prometheus-operator/charts/prometheus-node-exporter-1.7.3.tgz b/staging/prometheus-operator/charts/prometheus-node-exporter-1.7.3.tgz new file mode 100644 index 0000000000000000000000000000000000000000..9802fe5e7eae61d5ec1a36ecf4d9b5e9b45599b3 GIT binary patch literal 6072 zcmV;p7f0wHiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH<$R~tE&^SOS-U6WZtCNA3q!i;CKXKA2&IGfNMNT<)7Og5IQ zu5ERyq-sfJ@WkwI-=jx8?E+&S>E1;j5~I@VK6G`Zdv%ov9;2FID%*^Q*v89*3x%R{ zFd;(uGZIICxNGWmyWPFrUGsmp+pYiK-F>~2|2Fx(*WI1he}Fr8yh=)?BH|C-?p@od z1NS>gB%vQrNXl6srag}&N&aWo@BP{Dc_GR`&_tQMefT>@F$9`9V8jI!`~wyK@g_XR zK1`HKWWUoHQ#DD4e!$~SQKO@4e9k$}`+~CzE<)M&+O@{orGC6NSd60JvfP?{4>c_4&WO_cZ?>C2hbVjz}6QumrD7>MB$C-Uhs% zPzh85fq(43d)ppy5fh~_gb|HU7dXTqA_8Gb1SP`=B`6L=is`IOh^RWTg_6dCtn>I4-)l19-fk7zLK!|~{Zt5bmz znese|VoC$kg_|rg6QU3N~GBGyyB}a9do8(vlWqP?1X(7kw~J+B4J2%(f$H+opTxpuf1OqynLFgDD zuMLESxucNb?Vb94-i}KvJrf zG7GITgB$TE;un;SP3sY5x(Eq>Nu~K7>kb$OC?yfImj-@;F*nd71gVH-Fcie#W~5NS zW`XKW-!&RjWvILX06~)WVY}On-QO|BhNpMC@2G?D*A!)~LT|CcD_tRD-~AxqDO0x2 zxdED@go}x0F#!b5E9F2=cp8Oz@@ZNq3_Wna`k)!qwDW#~hQEN1479a26xXD+FdmN# z??_YzMYbs|40K~Jf!KDagfTJ{xXjkY~E*UvIk)1|7jXU=NJf7vJW3W zX{du#P>?OyQ*gTcm7Xo^W=?xyoTMnLkMseR#dQ`)bGY#E=9AOJ>Y=bZ)f&uEBw` z@AeN0b}=QF;HIheDG%Y?4GyfpsWq1C2byICLoMYf?}R*%oq)3d6D2!|so6<**v>$0 zyF4wGWO9^=2s0pW!%VgilZemkyipUPV4`PfMdNT~Fa(||NyD7Bft7kMA)-=w8#<2) zEp`M-Jyjzm#HZ4~WR~|Lck?Tfdjlg6!6g14}su5T&og;CG ziuhGlBqEsffyCkKou(pO_O+}`FFjB3$aLzKy*A)zgn{bA33uqXD_DVoqf084tU_`s zC^r*2B2u0hS->ITq10eW)Et@Ai`W%Uw~{Z8dLgn}KM*la%~a5!6hvg1k`;@3j zAO4p8bYDzK_bLDFz6m7Zk1U$mxi`03MX8qH2^kZ+jXU8C=XzZ4`9X%u`FAv=0s{-u z5|oeVI3`J^&Y4Cr^fj2pM+aLDn*H*QBn5a4Fnnu8`2T5Y-2dg;dFSNE_ebZxx>W1- zal`&^d*@ZRw*TAP-PwNH|2;q^bAMd5%m9K3H(F%B(|P8G z(SMdB&yWBkiB&qhD2=xN=DV+Appfg)cJ2#oLoV#-qG4gxe=l$(U( zQz!n(l+Z|Pr@mKGW&Xia2nd4iWOk-PzfBTK^v-ef`>b0aF_H&8g0aMyLhHn^>bIm|!1X zbd2=ryzq`L6LT!!TY zeQ2;G3^5>DqDUSi{8zp^wg?!RqXHlT?dcze_FRNYcs`sN#};$)4Ix*?;Vf8 z$wS*nF3hb>&7PnsxS&y_*V$C+aZ0^5r%~iayE?9mOi3WO= z%Kms};OW0voy?_i=BTl)RM(Dttd-nb4=@pwsS&jPDBFLOty*ig-`g-=({IC&@Pj)R zg&~nNfXE#3!|K$u6+&``f+dv$`ddI%H71-iNr!4N-yMLr-S*+@ z*L+Gpr!0ula7C?otEmI#_^dKTYhAV6j|FviX(LPK;kRCHe{*RC{VyaKb0%?ZZ=f6W zzt?+vy}JIlx7U5r{~jZi{eMW3$j-FqeW5Ju!=WMjIVvyPiuH3ZnsxTd9=HY1RKHnf zWQ&~-bD%6lreH@mqxC2~5}6q}jF2$48)r3lClc!|k>D&$`9ip`XY~NmBqR#Ym0rxp zGuxr{D>&m(MA`T!Q^4ela!xk3m|XtE@-vh^^gPJN=>RGrU8lR^?#~%;7Jp9I>oqeI zrD8~eOw3=u2;^v5TuN5O__9UFU$W*`7Nm+fxIBuQop@2LsMoEcrMb@qb5zmm5=&L- z{AOvzQa`qh+>n(sZDAIw`?_Sw`pQ77)S1ch3*Q;Iy6P`{R%8rUSFL(WrzM}6g*t^p zO)+LOwZynIVsMtWgs6$12p){&o6dww)zN<}FD7R)FC}Mkq0j{m{fJ71tnH3P-*nAM zPxq!%vJ&-jgCH!`HH*(VJM^}f(#g=93$*~WB~^?{(4Y!LiMZC8Xvwe(U!!VQmc6** z0gw9d{@}FqEuvFoDCMc(L#z&GqEzw&s(!sCV{7ZwbDIX0EDT2=3F)KvnLAr960Y!6 zsu8ZJKP?+vmlAD(H!G&e2L3IZSu;10#ir{Fy`<&EB31is<>NB8u3MGs9Hi?A!o~>C zKEH%#W&nC4eyc{>#d>dB$QA5H@QR{&=3@B*)}nZpw+s+Esy15KvYW^b~>grwb zoXNYWc*rd2rc$f1s|+*7bup`2a+4|gtN6~8x?7@~Euh+pu_f9;O)}*!BNfUyDO@SL zTt;16zg+aTqUg?IXcbn!ZuCXHtWtS>IzZLfX zxj*0o?f<*Gdv*IyuiJa_|9F&CpU)Ys?HP0BZje0|ILb=g`>}v`w_|~HC{a1@#N4eS zn=!@YTncG67pXllC>;dUCQ)S*(4_uX?i0AIiZ)*~`mM>o%KUe?7S_KCbi@4bZSQR_ z*neN|J3*^$=fy>2&Gb+8arG8^ahzl$F60qxc zah4w;tWt4V-%h0b7S1vYcYn_cH7mZVLhWXJRqi8|&gQDulqx4=tG2P+4EU{Q&i{?H zg8WZp(smaN1ds6c>%dL=-(IgS|F?TPy(jtq7^ya~%Qk?;{j~X7+)L6Wth9H2h-W;) z$|7MZkW@{$p#L&kVE@aX&E1k|ud*iGlUR3qW0|zqT3*a8q`ub%lF%OnPtARXO}tdd zG(2+ilUF{OgpZpE4>Qw(xw%Er7Mh|M%D$0aA2(Bpm72z@(kiNJ4f8(b;qB-Dkg`y( z`m2k9oAkf#Ud{i1`_<0g)Bf*K(p~5O%CmhzP<&_k=(i{ac!Xy-GS|JGAh-rN4`5LU z-gyi%9sYtrwx6x&)Xrw=RnCCc(WNW*%A4H2?^W77rQcdwP5$3449E@ge|u+pyMF)c zb@$2t=TXvq%71rvecBtMBALWLt{im8PL$ zM5P{Inns4Hf(tY^trEefG&C;bHaNftnM9xrLQK@e3@pDcFL-i55;CL_RTQ(>6K&Xk z`_}y2H~j`&pffxfrzWWzsTiW#u$kK=HnSXz6E!=eBF{(!*Gm|Nd5$#E9r8aze!(t? z2=`Bqi%OcAq(bHlLI)9{asD+k;pmLTa052)`1GJac-8vo{w~gN+YnRMhi>7OWkX2p zR-#InuN!cRBBmzj0+w!|YYs@6rJ|u=PBs%Y1OxYY||Y3yrnF? z%sMcEP$b;T2@o&^eM;k$TilGoOMp&eiuc zLOGKP4O!}t8*zeLCUn`KPK9sN)Cr<}4*(a?|tU(&?H zK9JrEUtT2M3x7zycrSbvC*F(y@iyQC5tOGAjt`Hd=ev^)&!-`hjx8tnFTY2PqjCQK z`18rp+4;Tnant{`C;1Z$pPt|1C4?;#RrLMazaF0b)$;~}LBN^h5qi%+YnwoAcOVrRM%Zb5hhZ=n zn3j&259~7dX>U`*O0C=~xgGM#d<72cAJkR@a0W@x1pu$5Rp7#OP4wguEPEXb{iZE@RNY z4&gTb8x+M?Bff1U$V#^)}EBwds4m$O8&8u_v!gY z52@xblg~CO~4hS>B&s{wXAvB?(Gq`PM2G>6^+Jx;i`_u_BLKl&x zjM@@wEXA^`R@7L;o{csyA){&;VLVPaGvU68lsup&u6j;zOMgp&$W$n?frag$j5phF z9uOZ4ol7{(k*u>z2zAkCDk7rPxs4 zdlF-L$+4P#0-dl@r%J|U}A^p)r&{YMR5~FTHN}b{x292_MwUYN+gI3se?2H($FRG~EfR%s*g> zZt>nZrd43di1*-W^*zH9s7ekbgId|rzAKNjS#mdd;H0I+b??a)l4KF8s!g`4TP&08 z^)R9jCD+%K>`UT$)@gOV7zot`O!n(9l{F!WN;kG!KVEhx4s0^s4su$}Y;ZQKyGv)< zeT1}@9#{*mw^X;N#ov3}q~}$u{B&Ha?-aOJ?&aHZkH=jN%AjQ-hZ<2bhy4{^HnXp4 z@(_r53d}r$gjJt7ya%XmF9=Il5}G*hXSV<=Za}&DS;e~oQitmHOL22d5VKyH*Q2Om zH`lo0*~YjU1;hxFlDR44dp#`Uk+0ip>areIW3c>mp>*tT0>DdNA4K-DLP%x6rzmWc z_|mYP@M#`Oerr+}Djv?cn4ri>>B<&+&ISF-5JS^m&)%XHBI|FeLvga%Jf=$M3|%mb zqG8{wy_nGKK_*jB>p93mJBnUc=1mt-${Ltt?rsU0gDLX}<+4Wt?@f7J(*JEIrkANy zNPU&gR8F+P6o%Doa{G}&!Hfr1G<)fu={C+yc5=vkFQ6eWJ^F2)fE)5@#p>gxlDlOV yYCeh(YNj�?Zw7(H=qRx%rjg{ML5!r}UJb(o_1qr~eHA0RR8sS+#uthyVbtd-JLQ literal 0 HcmV?d00001 diff --git a/staging/prometheus-operator/ci/01-provision-crds-values.yaml b/staging/prometheus-operator/ci/01-provision-crds-values.yaml new file mode 100644 index 0000000000..5ed75d78c2 --- /dev/null +++ b/staging/prometheus-operator/ci/01-provision-crds-values.yaml @@ -0,0 +1,42 @@ +alertmanager: + enabled: false +coreDns: + enabled: false +kubeApiServer: + enabled: false +kubeControllerManager: + enabled: false +kubeDns: + enabled: false +kubeEtcd: + enabled: false +kubeProxy: + enabled: false +kubeScheduler: + enabled: false +kubeStateMetrics: + enabled: false +kubelet: + enabled: false +nodeExporter: + enabled: false +grafana: + enabled: false +prometheus: + enabled: false +defaultRules: + create: false +# Default configuration of prometheus operator will create CRDs in the cluster idempotently +prometheusOperator: + enabled: true + serviceMonitor: + selfMonitor: false + createCustomResource: false + tlsProxy: + enabled: false + admissionWebhooks: + enabled: false + namespaces: + releaseNamespace: true + additional: + - kube-system diff --git a/staging/prometheus-operator/ci/02-test-without-crds-values.yaml b/staging/prometheus-operator/ci/02-test-without-crds-values.yaml new file mode 100644 index 0000000000..f1dd74f27d --- /dev/null +++ b/staging/prometheus-operator/ci/02-test-without-crds-values.yaml @@ -0,0 +1,6 @@ +prometheusOperator: + createCustomResource: false + namespaces: + releaseNamespace: true + additional: + - kube-system diff --git a/staging/prometheus-operator/ci/test-values.yaml b/staging/prometheus-operator/ci/test-values.yaml deleted file mode 100644 index d7181b55f1..0000000000 --- a/staging/prometheus-operator/ci/test-values.yaml +++ /dev/null @@ -1,1604 +0,0 @@ -# Default values for prometheus-operator. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -## Provide a name in place of prometheus-operator for `app:` labels -## -nameOverride: "" - -## Provide a name to substitute for the full names of resources -## -fullnameOverride: "" - -## Labels to apply to all resources -## -commonLabels: {} -# scmhash: abc123 -# myLabel: aakkmd - -## Create default rules for monitoring the cluster -## -defaultRules: - create: true - rules: - alertmanager: true - etcd: true - general: true - k8s: true - kubeApiserver: true - kubePrometheusNodeAlerting: true - kubePrometheusNodeRecording: true - kubeScheduler: true - kubernetesAbsent: true - kubernetesApps: true - kubernetesResources: true - kubernetesStorage: true - kubernetesSystem: true - node: true - prometheusOperator: true - prometheus: true - ## Labels for default rules - labels: {} - ## Annotations for default rules - annotations: {} - -## Provide custom recording or alerting rules to be deployed into the cluster. -## -additionalPrometheusRules: [] -# - name: my-rule-file -# groups: -# - name: my_group -# rules: -# - record: my_record -# expr: 100 * my_record - -## -global: - rbac: - create: true - pspEnabled: true - - ## Reference to one or more secrets to be used when pulling images - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - imagePullSecrets: [] - # - name: "image-pull-secret" - -## Configuration for alertmanager -## ref: https://prometheus.io/docs/alerting/alertmanager/ -## -alertmanager: - - ## Deploy alertmanager - ## - enabled: true - - ## Service account for Alertmanager to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - - ## Configure pod disruption budgets for Alertmanager - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget - ## This configuration is immutable once created and will require the PDB to be deleted to be changed - ## https://github.com/kubernetes/kubernetes/issues/45398 - ## - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - ## Alertmanager configuration directives - ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file - ## https://prometheus.io/webtools/alerting/routing-tree-editor/ - ## - config: - global: - resolve_timeout: 5m - route: - group_by: ['job'] - group_wait: 30s - group_interval: 5m - repeat_interval: 12h - receiver: 'null' - routes: - - match: - alertname: Watchdog - receiver: 'null' - receivers: - - name: 'null' - - ## Pass the Alertmanager configuration directives through Helm's templating - ## engine. If the Alertmanager configuration contains Alertmanager templates, - ## they'll need to be properly escaped so that they are not interpreted by - ## Helm - ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function - ## https://prometheus.io/docs/alerting/configuration/#%3Ctmpl_string%3E - ## https://prometheus.io/docs/alerting/notifications/ - ## https://prometheus.io/docs/alerting/notification_examples/ - tplConfig: false - - ## Alertmanager template files to format alerts - ## ref: https://prometheus.io/docs/alerting/notifications/ - ## https://prometheus.io/docs/alerting/notification_examples/ - ## - templateFiles: {} - # - ## An example template: - # template_1.tmpl: |- - # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }} - # - # {{ define "slack.myorg.text" }} - # {{- $root := . -}} - # {{ range .Alerts }} - # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` - # *Cluster:* {{ template "cluster" $root }} - # *Description:* {{ .Annotations.description }} - # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> - # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:> - # *Details:* - # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}` - # {{ end }} - - ingress: - enabled: false - - annotations: {} - - labels: {} - - ## Hosts must be provided if Ingress is enabled. - ## - hosts: [] - # - alertmanager.domain.com - - ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix - ## - paths: [] - # - / - - ## TLS configuration for Alertmanager Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: alertmanager-general-tls - # hosts: - # - alertmanager.example.com - - ## Configuration for Alertmanager service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30903 - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - loadBalancerIP: "" - loadBalancerSourceRanges: [] - ## Service type - ## - type: ClusterIP - - ## If true, create a serviceMonitor for alertmanager - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - - ## Settings affecting alertmanagerSpec - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec - ## - alertmanagerSpec: - ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata - ## Metadata Labels and Annotations gets propagated to the Alertmanager pods. - ## - podMetadata: {} - - ## Image of Alertmanager - ## - image: - repository: quay.io/prometheus/alertmanager - tag: v0.17.0 - - ## If true then the user will be responsible to provide a secret with alertmanager configuration - ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used - ## - useExistingSecret: false - - ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the - ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. - ## - secrets: [] - - ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. - ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/. - ## - configMaps: [] - - ## Define Log Format - # Use logfmt (default) or json-formatted logging - # logFormat: logfmt - - ## Log level for Alertmanager to be configured with. - ## - logLevel: info - - ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the - ## running cluster equal to the expected size. - replicas: 1 - - ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression - ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours). - ## - retention: 120h - - ## Storage is the definition of how storage will be used by the Alertmanager instances. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md - ## - storage: {} - # volumeClaimTemplate: - # spec: - # storageClassName: gluster - # accessModes: ["ReadWriteOnce"] - # resources: - # requests: - # storage: 50Gi - # selector: {} - - - ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false - ## - externalUrl: - - ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, - ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. - ## - routePrefix: / - - ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. - ## - paused: false - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Define resources requests and limits for single Pods. - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # requests: - # memory: 400Mi - - ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. - ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. - ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. - ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. - ## - podAntiAffinity: "" - - ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. - ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone - ## - podAntiAffinityTopologyKey: kubernetes.io/hostname - - ## Assign custom affinity rules to the alertmanager instance - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - ## If specified, the pod's tolerations. - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## - securityContext: - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - - ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. - ## Note this is only for the Alertmanager UI, not the gossip communication. - ## - listenLocal: false - - ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. - ## - containers: [] - - ## Priority class assigned to the Pods - ## - priorityClassName: "" - - ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. - ## - additionalPeers: [] - -## Using default values from https://github.com/helm/charts/blob/master/stable/grafana/values.yaml -## -grafana: - enabled: true - - ## Deploy default dashboards. - ## - defaultDashboardsEnabled: true - - adminPassword: prom-operator - - ingress: - ## If true, Grafana Ingress will be created - ## - enabled: false - - ## Annotations for Grafana Ingress - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - - ## Labels to be added to the Ingress - ## - labels: {} - - ## Hostnames. - ## Must be provided if Ingress is enable. - ## - # hosts: - # - grafana.domain.com - hosts: [] - - ## Path for grafana ingress - path: / - - ## TLS configuration for grafana Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: grafana-general-tls - # hosts: - # - grafana.example.com - - sidecar: - dashboards: - enabled: true - label: grafana_dashboard - datasources: - enabled: true - defaultDatasourceEnabled: true - label: grafana_datasource - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/grafana/ssl/ - # configMap: certs-configmap - # readOnly: true - - ## Configure additional grafana datasources - ## ref: http://docs.grafana.org/administration/provisioning/#datasources - additionalDataSources: [] - # - name: prometheus-sample - # access: proxy - # basicAuth: true - # basicAuthPassword: pass - # basicAuthUser: daco - # editable: false - # jsonData: - # tlsSkipVerify: true - # orgId: 1 - # type: prometheus - # url: https://prometheus.svc:9090 - # version: 1 - - ## If true, create a serviceMonitor for grafana - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - -## Component scraping the kube api server -## -kubeApiServer: - enabled: true - tlsConfig: - serverName: kubernetes - insecureSkipVerify: false - - ## If your API endpoint address is not reachable (as in AKS) you can replace it with the kubernetes service - ## - relabelings: [] - # - sourceLabels: - # - __meta_kubernetes_namespace - # - __meta_kubernetes_service_name - # - __meta_kubernetes_endpoint_port_name - # action: keep - # regex: default;kubernetes;https - # - targetLabel: __address__ - # replacement: kubernetes.default.svc:443 - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - jobLabel: component - selector: - matchLabels: - component: apiserver - provider: kubernetes - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - -## Component scraping the kubelet and kubelet-hosted cAdvisor -## -kubelet: - enabled: true - namespace: kube-system - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Enable scraping the kubelet over https. For requirements to enable this see - ## https://github.com/coreos/prometheus-operator/issues/926 - ## - https: true - - ## Metric relabellings to apply to samples before ingestion - ## - cAdvisorMetricRelabelings: [] - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - # relabel configs to apply to samples before ingestion. - ## - cAdvisorRelabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - - metricRelabelings: [] - # - sourceLabels: [__name__, image] - # separator: ; - # regex: container_([a-z_]+); - # replacement: $1 - # action: drop - # - sourceLabels: [__name__] - # separator: ; - # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) - # replacement: $1 - # action: drop - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - -## Component scraping the kube controller manager -## -kubeControllerManager: - enabled: true - - ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## If using kubeControllerManager.endpoints only the port and targetPort are used - ## - service: - port: 10252 - targetPort: 10252 - selector: - component: kube-controller-manager - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Enable scraping kube-controller-manager over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks - ## - https: false - - # Skip TLS certificate validation when scraping - insecureSkipVerify: null - - # Name of the server to use when validating TLS certificate - serverName: null - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - -## Component scraping coreDns. Use either this or kubeDns -## -coreDns: - enabled: true - service: - port: 9153 - targetPort: 9153 - selector: - k8s-app: kube-dns - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - -## Component scraping kubeDns. Use either this or coreDns -## -kubeDns: - enabled: false - service: - selector: - k8s-app: kube-dns - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - -## Component scraping etcd -## -kubeEtcd: - enabled: true - - ## If your etcd is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used - ## - service: - port: 2379 - targetPort: 2379 - selector: - component: etcd - - ## Configure secure access to the etcd cluster by loading a secret into prometheus and - ## specifying security configuration below. For example, with a secret named etcd-client-cert - ## - ## serviceMonitor: - ## scheme: https - ## insecureSkipVerify: false - ## serverName: localhost - ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca - ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client - ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - scheme: http - insecureSkipVerify: false - serverName: "" - caFile: "" - certFile: "" - keyFile: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - - -## Component scraping kube scheduler -## -kubeScheduler: - enabled: true - - ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on - ## - endpoints: [] - # - 10.141.4.22 - # - 10.141.4.23 - # - 10.141.4.24 - - ## If using kubeScheduler.endpoints only the port and targetPort are used - ## - service: - port: 10251 - targetPort: 10251 - selector: - component: kube-scheduler - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - ## Enable scraping kube-controller-manager over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks - ## - https: false - - ## Skip TLS certificate validation when scraping - insecureSkipVerify: null - - ## Name of the server to use when validating TLS certificate - serverName: null - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - - -## Component scraping kube proxy -## -kubeProxy: - enabled: true - - service: - port: 10249 - targetPort: 10249 - selector: - k8s-app: kube-proxy - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## Enable scraping kube-proxy over https. - ## Requires proper certs (not self-signed) and delegated authentication/authorization checks - ## - https: false - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - -## Component scraping kube state metrics -## -kubeStateMetrics: - enabled: true - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - -## Configuration for kube-state-metrics subchart -## -kube-state-metrics: - rbac: - create: true - podSecurityPolicy: - enabled: true - -## Deploy node exporter as a daemonset to all nodes -## -nodeExporter: - enabled: true - - ## Use the value configured in prometheus-node-exporter.podLabels - ## - jobLabel: jobLabel - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - sourceLabels: [__name__] - # separator: ; - # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ - # replacement: $1 - # action: drop - - ## relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - -## Configuration for prometheus-node-exporter subchart -## -prometheus-node-exporter: - podLabels: - ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards - ## - jobLabel: node-exporter - extraArgs: - - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) - - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ - -## Manages Prometheus and Alertmanager components -## -prometheusOperator: - enabled: true - - ## Service account for Alertmanager to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - - ## Configuration for Prometheus operator service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30080 - - ## Additional ports to open for Prometheus service - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services - ## - additionalPorts: [] - - ## Loadbalancer IP - ## Only use if service.type is "loadbalancer" - ## - loadBalancerIP: "" - loadBalancerSourceRanges: [] - - ## Service type - ## NodepPort, ClusterIP, loadbalancer - ## - type: ClusterIP - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - ## Deploy CRDs used by Prometheus Operator. - ## - createCustomResource: true - - ## Customize CRDs API Group - crdApiGroup: monitoring.coreos.com - - # Remove CRDs before instaling, created for use on CI environment. - cleanupCustomResourceBeforeInstall: true - - ## Attempt to clean up CRDs created by Prometheus Operator. - ## - cleanupCustomResource: true - - ## Labels to add to the operator pod - ## - podLabels: {} - - ## Annotations to add to the operator pod - ## - podAnnotations: {} - - ## Assign a PriorityClassName to pods if set - # priorityClassName: "" - - ## Define Log Format - # Use logfmt (default) or json-formatted logging - # logFormat: logfmt - - ## Decrease log verbosity to errors only - # logLevel: error - - ## If true, the operator will create and maintain a service for scraping kubelets - ## ref: https://github.com/coreos/prometheus-operator/blob/master/helm/prometheus-operator/README.md - ## - kubeletService: - enabled: true - namespace: kube-system - - ## Create a servicemonitor for the operator - ## - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - - ## Resource limits & requests - ## - resources: {} - # limits: - # cpu: 200m - # memory: 200Mi - # requests: - # cpu: 100m - # memory: 100Mi - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Tolerations for use with node taints - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## Assign custom affinity rules to the prometheus operator - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - securityContext: - runAsNonRoot: true - runAsUser: 65534 - - ## Prometheus-operator image - ## - image: - repository: quay.io/coreos/prometheus-operator - tag: v0.31.1 - pullPolicy: IfNotPresent - - ## Configmap-reload image to use for reloading configmaps - ## - configmapReloadImage: - repository: quay.io/coreos/configmap-reload - tag: v0.0.1 - - ## Prometheus-config-reloader image to use for config and rule reloading - ## - prometheusConfigReloaderImage: - repository: quay.io/coreos/prometheus-config-reloader - tag: v0.31.1 - - ## Set the prometheus config reloader side-car CPU limit. If unset, uses the prometheus-operator project default - ## - # configReloaderCpu: 100m - - ## Set the prometheus config reloader side-car memory limit. If unset, uses the prometheus-operator project default - ## - # configReloaderMemory: 25Mi - - ## Hyperkube image to use when cleaning up - ## - hyperkubeImage: - repository: k8s.gcr.io/hyperkube - tag: v1.12.1 - pullPolicy: IfNotPresent - -## Deploy a Prometheus instance -## -prometheus: - - enabled: true - - ## Service account for Prometheuses to use. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## - serviceAccount: - create: true - name: "" - - ## Configuration for Prometheus service - ## - service: - annotations: {} - labels: {} - clusterIP: "" - - - ## To be used with a proxy extraContainer port - targetPort: 9090 - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - ## Port to expose on each node - ## Only used if service.type is 'NodePort' - ## - nodePort: 30090 - - ## Loadbalancer IP - ## Only use if service.type is "loadbalancer" - loadBalancerIP: "" - loadBalancerSourceRanges: [] - ## Service type - ## - type: ClusterIP - - sessionAffinity: "" - - ## Configure pod disruption budgets for Prometheus - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget - ## This configuration is immutable once created and will require the PDB to be deleted to be changed - ## https://github.com/kubernetes/kubernetes/issues/45398 - ## - podDisruptionBudget: - enabled: false - minAvailable: 1 - maxUnavailable: "" - - ingress: - enabled: false - annotations: {} - labels: {} - - ## Hostnames. - ## Must be provided if Ingress is enabled. - ## - # hosts: - # - prometheus.domain.com - hosts: [] - - ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix - ## - paths: [] - # - / - - ## TLS configuration for Prometheus Ingress - ## Secret must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-general-tls - # hosts: - # - prometheus.example.com - - serviceMonitor: - ## Scrape interval. If not set, the Prometheus default scrape interval is used. - ## - interval: "" - selfMonitor: true - - ## metric relabel configs to apply to samples before ingestion. - ## - metricRelabelings: [] - # - action: keep - # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' - # sourceLabels: [__name__] - - # relabel configs to apply to samples before ingestion. - ## - relabelings: [] - # - sourceLabels: [__meta_kubernetes_pod_node_name] - # separator: ; - # regex: ^(.*)$ - # target_label: nodename - # replacement: $1 - # action: replace - - ## Settings affecting prometheusSpec - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec - ## - prometheusSpec: - - ## Interval between consecutive scrapes. - ## - scrapeInterval: "" - - ## Interval between consecutive evaluations. - ## - evaluationInterval: "" - - ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. - ## - listenLocal: false - - ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. - ## This is disabled by default. - ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis - ## - enableAdminAPI: false - - ## Image of Prometheus. - ## - image: - repository: quay.io/prometheus/prometheus - tag: v2.10.0 - - ## Tolerations for use with node taints - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - - ## Alertmanagers to which alerts will be sent - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints - ## - ## Default configuration will connect to the alertmanager deployed as part of this release - ## - alertingEndpoints: [] - # - name: "" - # namespace: "" - # port: http - # scheme: http - - ## External labels to add to any time series or alerts when communicating with external systems - ## - externalLabels: {} - - ## Name of the external label used to denote replica name - ## - replicaExternalLabelName: "" - - ## If true, the Operator won't add the external label used to denote replica name - ## - replicaExternalLabelNameClear: false - - ## Name of the external label used to denote Prometheus instance name - ## - prometheusExternalLabelName: "" - - ## If true, the Operator won't add the external label used to denote Prometheus instance name - ## - prometheusExternalLabelNameClear: false - - ## External URL at which Prometheus will be reachable. - ## - externalUrl: "" - - ## Define which Nodes the Pods are scheduled on. - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. - ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not - ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated - ## with the new list of secrets. - ## - secrets: [] - - ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. - ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. - ## - configMaps: [] - - ## QuerySpec defines the query command line flags when starting Prometheus. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#queryspec - ## - query: {} - - ## Namespaces to be selected for PrometheusRules discovery. - ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery. - ## See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage - ## - ruleNamespaceSelector: {} - - ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the PrometheusRule resources created - ## - ruleSelectorNilUsesHelmValues: true - - ## PrometheusRules to be selected for target discovery. - ## If {}, select all ServiceMonitors - ## - ruleSelector: {} - ## Example which select all prometheusrules resources - ## with label "prometheus" with values any of "example-rules" or "example-rules-2" - # ruleSelector: - # matchExpressions: - # - key: prometheus - # operator: In - # values: - # - example-rules - # - example-rules-2 - # - ## Example which select all prometheusrules resources with label "role" set to "example-rules" - # ruleSelector: - # matchLabels: - # role: example-rules - - ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the - ## prometheus resource to be created with selectors based on values in the helm deployment, - ## which will also match the servicemonitors created - ## - serviceMonitorSelectorNilUsesHelmValues: true - - ## ServiceMonitors to be selected for target discovery. - ## If {}, select all ServiceMonitors - ## - serviceMonitorSelector: {} - ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" - # serviceMonitorSelector: - # matchLabels: - # prometheus: somelabel - - ## Namespaces to be selected for ServiceMonitor discovery. - ## See https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage - ## - serviceMonitorNamespaceSelector: {} - - ## How long to retain metrics - ## - retention: 10d - - ## If true, the Operator won't process any Prometheus configuration changes - ## - paused: false - - ## Number of Prometheus replicas desired - ## - replicas: 1 - - ## Log level for Prometheus be configured in - ## - logLevel: info - - ## Log format for Prometheus be configured in - ## - logFormat: logfmt - - ## Prefix used to register routes, overriding externalUrl route. - ## Useful for proxies that rewrite URLs. - ## - routePrefix: / - - ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata - ## Metadata Labels and Annotations gets propagated to the prometheus pods. - ## - podMetadata: {} - # labels: - # app: prometheus - # k8s-app: prometheus - - ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. - ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. - ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. - ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. - podAntiAffinity: "" - - ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. - ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone - ## - podAntiAffinityTopologyKey: kubernetes.io/hostname - - ## Assign custom affinity rules to the prometheus instance - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - affinity: {} - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/e2e-az-name - # operator: In - # values: - # - e2e-az1 - # - e2e-az2 - - ## The remote_read spec configuration for Prometheus. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotereadspec - remoteRead: [] - # - url: http://remote1/read - - ## The remote_write spec configuration for Prometheus. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#remotewritespec - remoteWrite: [] - # - url: http://remote1/push - - ## Resource limits & requests - ## - resources: {} - # requests: - # memory: 400Mi - - ## Prometheus StorageSpec for persistent data - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md - ## - storageSpec: {} - # volumeClaimTemplate: - # spec: - # storageClassName: gluster - # accessModes: ["ReadWriteOnce"] - # resources: - # requests: - # storage: 50Gi - # selector: {} - - ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations - ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form - ## as specified in the official Prometheus documentation: - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. As scrape configs are - ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility - ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible - ## scrape configs are going to break Prometheus after the upgrade. - ## - ## The scrape configuraiton example below will find master nodes, provided they have the name .*mst.*, relabel the - ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes - ## - additionalScrapeConfigs: [] - # - job_name: kube-etcd - # kubernetes_sd_configs: - # - role: node - # scheme: https - # tls_config: - # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca - # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client - # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key - # relabel_configs: - # - action: labelmap - # regex: __meta_kubernetes_node_label_(.+) - # - source_labels: [__address__] - # action: replace - # target_label: __address__ - # regex: ([^:;]+):(\d+) - # replacement: ${1}:2379 - # - source_labels: [__meta_kubernetes_node_name] - # action: keep - # regex: .*mst.* - # - source_labels: [__meta_kubernetes_node_name] - # action: replace - # target_label: node - # regex: (.*) - # replacement: ${1} - # metric_relabel_configs: - # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) - # action: labeldrop - - - ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified - ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. - ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. - ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this - ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release - ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. - ## - additionalAlertManagerConfigs: [] - # - consul_sd_configs: - # - server: consul.dev.test:8500 - # scheme: http - # datacenter: dev - # tag_separator: ',' - # services: - # - metrics-prometheus-alertmanager - - ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended - ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the - ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. - ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the - ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel - ## configs are going to break Prometheus after the upgrade. - ## - additionalAlertRelabelConfigs: [] - # - separator: ; - # regex: prometheus_replica - # replacement: $1 - # action: labeldrop - - ## SecurityContext holds pod-level security attributes and common container settings. - ## This defaults to non root user with uid 1000 and gid 2000. - ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md - ## - securityContext: - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - - ## Priority class assigned to the Pods - ## - priorityClassName: "" - - ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. - ## This section is experimental, it may change significantly without deprecation notice in any release. - ## This is experimental and may change significantly without backward compatibility in any release. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#thanosspec - ## - thanos: {} - - ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. - ## if using proxy extraContainer update targetPort with proxy container port - containers: [] - - ## Enable additional scrape configs that are managed externally to this chart. Note that the prometheus - ## will fail to provision if the correct secret does not exist. - ## - additionalScrapeConfigsExternal: false - - additionalServiceMonitors: [] - ## Name of the ServiceMonitor to create - ## - # - name: "" - - ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from - ## the chart - ## - # additionalLabels: {} - - ## Service label for use in assembling a job name of the form