forked from tektoncd/pipeline
-
Notifications
You must be signed in to change notification settings - Fork 0
/
200-clusterrole.yaml
121 lines (120 loc) · 5.5 KB
/
200-clusterrole.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# Copyright 2020 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: tekton-pipelines-controller-cluster-access
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-pipelines
rules:
- apiGroups: [""]
# Namespace access is required because the controller timeout handling logic
# iterates over all namespaces and times out any PipelineRuns that have expired.
# Pod access is required because the taskrun controller wants to be updated when
# a Pod underlying a TaskRun changes state.
resources: ["namespaces", "pods"]
verbs: ["list", "watch"]
# Controller needs cluster access to all of the CRDs that it is responsible for
# managing.
- apiGroups: ["tekton.dev"]
resources: ["tasks", "clustertasks", "taskruns", "pipelines", "pipelineruns", "pipelineresources", "conditions"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]
- apiGroups: ["tekton.dev"]
resources: ["taskruns/finalizers", "pipelineruns/finalizers"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]
- apiGroups: ["tekton.dev"]
resources: ["tasks/status", "clustertasks/status", "taskruns/status", "pipelines/status", "pipelineruns/status", "pipelineresources/status"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
resourceNames: ["tekton-pipelines"]
verbs: ["use"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# This is the access that the controller needs on a per-namespace basis.
name: tekton-pipelines-controller-tenant-access
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-pipelines
rules:
- apiGroups: [""]
resources: ["pods", "pods/log", "secrets", "events", "serviceaccounts", "configmaps", "persistentvolumeclaims", "limitranges"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]
# Unclear if this access is actually required. Simply a hold-over from the previous
# incarnation of the controller's ClusterRole.
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]
- apiGroups: ["apps"]
resources: ["deployments/finalizers"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: tekton-pipelines-webhook-cluster-access
labels:
app.kubernetes.io/component: webhook
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-pipelines
rules:
# The webhook needs to be able to list and update customresourcedefinitions,
# mainly to update the webhook certificates.
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions", "customresourcedefinitions/status"]
verbs: ["get", "list", "update", "patch", "watch"]
- apiGroups: ["admissionregistration.k8s.io"]
# The webhook performs a reconciliation on these two resources and continuously
# updates configuration.
resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"]
# knative starts informers on these things, which is why we need get, list and watch.
verbs: ["list", "watch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["mutatingwebhookconfigurations"]
# This mutating webhook is responsible for applying defaults to tekton objects
# as they are received.
resourceNames: ["webhook.pipeline.tekton.dev"]
# When there are changes to the configs or secrets, knative updates the mutatingwebhook config
# with the updated certificates or the refreshed set of rules.
verbs: ["get", "update"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["validatingwebhookconfigurations"]
# validation.webhook.pipeline.tekton.dev performs schema validation when you, for example, create TaskRuns.
# config.webhook.pipeline.tekton.dev validates the logging configuration against knative's logging structure
resourceNames: ["validation.webhook.pipeline.tekton.dev", "config.webhook.pipeline.tekton.dev"]
# When there are changes to the configs or secrets, knative updates the validatingwebhook config
# with the updated certificates or the refreshed set of rules.
verbs: ["get", "update"]
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
resourceNames: ["tekton-pipelines"]
verbs: ["use"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: tekton-pipelines-leader-election
labels:
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-pipelines
rules:
# We uses leases for leaderelection
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]