-
Notifications
You must be signed in to change notification settings - Fork 161
/
defaultconfig.yaml
193 lines (193 loc) · 7.1 KB
/
defaultconfig.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
apiVersion: kubecontrolplane.config.openshift.io/v1
kind: KubeAPIServerConfig
admission:
pluginConfig:
network.openshift.io/ExternalIPRanger:
configuration:
allowIngressIP: true
apiVersion: network.openshift.io/v1
externalIPNetworkCIDRs: null
kind: ExternalIPRangerAdmissionConfig
location: ""
PodSecurity:
configuration:
kind: PodSecurityConfiguration
apiVersion: pod-security.admission.config.k8s.io/v1
defaults:
enforce: "invalid-to-force-substitution"
enforce-version: "invalid-to-force-substitution"
audit: "invalid-to-force-substitution"
audit-version: "invalid-to-force-substitution"
warn: "invalid-to-force-substitution"
warn-version: "invalid-to-force-substitution"
exemptions:
usernames:
# The build controller creates pods that are likely to be privileged
# based on BuildConfig objects. Access to these build pods is however
# still limited by the SCC exec admission and so we can safely add the
# build-controller SA here.
# This configuration should never be exposed to cluster users as no
# such guarantees are made for any other OpenShift SA/user.
- system:serviceaccount:openshift-infra:build-controller
apiServerArguments:
allow-privileged:
- "true"
anonymous-auth:
- "true"
authorization-mode:
- Scope
- SystemMasters
- RBAC
- Node
audit-log-format:
- json
audit-log-maxbackup:
- "10"
audit-log-maxsize:
# size chosen to be large enough to hold all the audit for an upgrade + e2e parallel test.
- "200"
audit-log-path:
- /var/log/kube-apiserver/audit.log
audit-policy-file:
- /etc/kubernetes/static-pod-resources/configmaps/kube-apiserver-audit-policies/policy.yaml
client-ca-file:
- /etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt
enable-admission-plugins:
- CertificateApproval
- CertificateSigning
- CertificateSubjectRestriction
- DefaultIngressClass
- DefaultStorageClass
- DefaultTolerationSeconds
- LimitRanger
- MutatingAdmissionWebhook
- NamespaceLifecycle
- NodeRestriction
- OwnerReferencesPermissionEnforcement
- PersistentVolumeClaimResize
- PodNodeSelector
- PodTolerationRestriction
- Priority
- ResourceQuota
- RuntimeClass
- ServiceAccount
- StorageObjectInUseProtection
- TaintNodesByCondition
- ValidatingAdmissionWebhook
- ValidatingAdmissionPolicy
- authorization.openshift.io/RestrictSubjectBindings
- authorization.openshift.io/ValidateRoleBindingRestriction
- config.openshift.io/DenyDeleteClusterConfiguration
- config.openshift.io/ValidateAPIServer
- config.openshift.io/ValidateAuthentication
- config.openshift.io/ValidateConsole
- config.openshift.io/ValidateFeatureGate
- config.openshift.io/ValidateImage
- config.openshift.io/ValidateOAuth
- config.openshift.io/ValidateProject
- config.openshift.io/ValidateScheduler
- image.openshift.io/ImagePolicy
- network.openshift.io/ExternalIPRanger
- network.openshift.io/RestrictedEndpointsAdmission
- quota.openshift.io/ClusterResourceQuota
- quota.openshift.io/ValidateClusterResourceQuota
- route.openshift.io/IngressAdmission
- scheduling.openshift.io/OriginPodNodeEnvironment
- security.openshift.io/DefaultSecurityContextConstraints
- security.openshift.io/SCCExecRestrictions
- security.openshift.io/SecurityContextConstraint
- security.openshift.io/ValidateSecurityContextConstraints
- storage.openshift.io/CSIInlineVolumeSecurity
# switch to direct pod IP routing for aggregated apiservers to avoid service IPs as on source of instability
enable-aggregator-routing:
- "true"
enable-logs-handler:
- "false"
endpoint-reconciler-type:
- "lease"
etcd-cafile:
- /etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt
etcd-certfile:
- /etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.crt
etcd-keyfile:
- /etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.key
etcd-prefix:
- kubernetes.io
etcd-healthcheck-timeout:
- 9s
etcd-readycheck-timeout:
- 9s
event-ttl:
- 3h
goaway-chance:
- "0"
http2-max-streams-per-connection:
- "2000" # recommended is 1000, but we need to mitigate https://github.com/kubernetes/kubernetes/issues/74412
kubelet-certificate-authority:
- /etc/kubernetes/static-pod-resources/configmaps/kubelet-serving-ca/ca-bundle.crt
kubelet-client-certificate:
- /etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt
kubelet-client-key:
- /etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key
kubelet-preferred-address-types:
- InternalIP # all of our kubelets have internal IPs and we *only* support communicating with them via that internal IP so that NO_PROXY always works and is lightweight
kubelet-read-only-port:
- "0"
kubernetes-service-node-port:
- "0"
# value should logically scale with max-requests-inflight
max-mutating-requests-inflight:
- "1000"
# value needed to be bumped for scale tests. The kube-apiserver did ok here
max-requests-inflight:
- "3000"
min-request-timeout:
- "3600"
proxy-client-cert-file:
- /etc/kubernetes/static-pod-certs/secrets/aggregator-client/tls.crt
proxy-client-key-file:
- /etc/kubernetes/static-pod-certs/secrets/aggregator-client/tls.key
requestheader-allowed-names:
- kube-apiserver-proxy
- system:kube-apiserver-proxy
- system:openshift-aggregator
requestheader-client-ca-file:
- /etc/kubernetes/static-pod-certs/configmaps/aggregator-client-ca/ca-bundle.crt
requestheader-extra-headers-prefix:
- X-Remote-Extra-
requestheader-group-headers:
- X-Remote-Group
requestheader-username-headers:
- X-Remote-User
# need to enable alpha APIs for the priority and fairness feature
service-account-lookup:
- "true"
service-node-port-range:
- 30000-32767
shutdown-delay-duration:
- 70s # give SDN some time to converge: 30s for iptable lock contention, 25s for the second try and some seconds for AWS to update ELBs
shutdown-send-retry-after:
- "true"
storage-backend:
- etcd3
storage-media-type:
- application/vnd.kubernetes.protobuf
tls-cert-file:
- /etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt
tls-private-key-file:
- /etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key
# CVE-2022-3259: Set HTTP Strict Transport Security
# Chrome and Mozilla Firefox maintain an HSTS preload list
# See issue: golang.org/issue/26162
strict-transport-security-directives:
- max-age=31536000,includeSubDomains,preload
authConfig:
oauthMetadataFile: ""
consolePublicURL: ""
projectConfig:
defaultNodeSelector: ""
servicesSubnet: 10.3.0.0/16 # ServiceCIDR # set by observe_network.go
servingInfo:
bindAddress: 0.0.0.0:6443 # set by observe_network.go
bindNetwork: tcp4 # set by observe_network.go
namedCertificates: null # set by observe_apiserver.go