forked from kubernetes/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 107
/
rules.go
417 lines (362 loc) · 24.9 KB
/
rules.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
package annotate
import (
// ensure all the ginkgo tests are loaded
_ "k8s.io/kubernetes/openshift-hack/e2e"
)
var (
TestMaps = map[string][]string{
// alpha features that are not gated
"[Disabled:Alpha]": {
`\[Feature:StorageVersionAPI\]`,
`\[Feature:InPlacePodVerticalScaling\]`,
`\[Feature:RecoverVolumeExpansionFailure\]`,
`\[Feature:WatchList\]`,
`\[Feature:ServiceCIDRs\]`,
`\[Feature:ClusterTrustBundle\]`,
`\[Feature:SELinuxMount\]`,
`\[FeatureGate:SELinuxMount\]`,
`\[Feature:RelaxedEnvironmentVariableValidation\]`,
`\[Feature:UserNamespacesPodSecurityStandards\]`,
`\[Feature:UserNamespacesSupport\]`, // disabled Beta
`\[Feature:DynamicResourceAllocation\]`,
`\[Feature:VolumeAttributesClass\]`, // disabled Beta
},
// tests for features that are not implemented in openshift
"[Disabled:Unimplemented]": {
`Monitoring`, // Not installed, should be
`Cluster level logging`, // Not installed yet
`Kibana`, // Not installed
`Ubernetes`, // Can't set zone labels today
`kube-ui`, // Not installed by default
`Kubernetes Dashboard`, // Not installed by default (also probably slow image pull)
`should proxy to cadvisor`, // we don't expose cAdvisor port directly for security reasons
`\[Feature:BootstrapTokens\]`, // we don't serve cluster-info configmap
`\[Feature:KubeProxyDaemonSetMigration\]`, // upgrades are run separately
`\[Feature:BoundServiceAccountTokenVolume\]`, // upgrades are run separately
`\[Feature:StatefulUpgrade\]`, // upgrades are run separately
},
// tests that rely on special configuration that we do not yet support
"[Disabled:SpecialConfig]": {
// GPU node needs to be available
`\[Feature:GPUDevicePlugin\]`,
`\[sig-scheduling\] GPUDevicePluginAcrossRecreate \[Feature:Recreate\]`,
`\[Feature:LocalStorageCapacityIsolation\]`, // relies on a separate daemonset?
`\[sig-cloud-provider-gcp\]`, // these test require a different configuration - note that GCE tests from the sig-cluster-lifecycle were moved to the sig-cloud-provider-gcpcluster lifecycle see https://github.com/kubernetes/kubernetes/commit/0b3d50b6dccdc4bbd0b3e411c648b092477d79ac#diff-3b1910d08fb8fd8b32956b5e264f87cb
`kube-dns-autoscaler`, // Don't run kube-dns
`should check if Kubernetes master services is included in cluster-info`, // Don't run kube-dns
`DNS configMap`, // this tests dns federation configuration via configmap, which we don't support yet
`NodeProblemDetector`, // requires a non-master node to run on
`Advanced Audit should audit API calls`, // expects to be able to call /logs
`Firewall rule should have correct firewall rules for e2e cluster`, // Upstream-install specific
// https://bugzilla.redhat.com/show_bug.cgi?id=2079958
`\[sig-network\] \[Feature:Topology Hints\] should distribute endpoints evenly`,
// Tests require SSH configuration and is part of the parallel suite, which does not create the bastion
// host. Enabling the test would result in the bastion being created for every parallel test execution.
// Given that we have existing oc and WMCO tests that cover this functionality, we can safely disable it.
`\[Feature:NodeLogQuery\]`,
},
// tests that are known broken and need to be fixed upstream or in openshift
// always add an issue here
"[Disabled:Broken]": {
`mount an API token into pods`, // We add 6 secrets, not 1
`ServiceAccounts should ensure a single API token exists`, // We create lots of secrets
`unchanging, static URL paths for kubernetes api services`, // the test needs to exclude URLs that are not part of conformance (/logs)
`Services should be able to up and down services`, // we don't have wget installed on nodes
`KubeProxy should set TCP CLOSE_WAIT timeout`, // the test require communication to port 11302 in the cluster nodes
`should check kube-proxy urls`, // previously this test was skipped b/c we reported -1 as the number of nodes, now we report proper number and test fails
`SSH`, // TRIAGE
`should implement service.kubernetes.io/service-proxy-name`, // this is an optional test that requires SSH. sig-network
`recreate nodes and ensure they function upon restart`, // https://bugzilla.redhat.com/show_bug.cgi?id=1756428
`\[Driver: iscsi\]`, // https://bugzilla.redhat.com/show_bug.cgi?id=1711627
"RuntimeClass should reject",
`Services should implement service.kubernetes.io/headless`, // requires SSH access to function, needs to be refactored
`ClusterDns \[Feature:Example\] should create pod that uses dns`, // doesn't use bindata, not part of kube test binary
`Simple pod should return command exit codes should handle in-cluster config`, // kubectl cp doesn't work or is not preserving executable bit, we have this test already
// TODO(node): configure the cri handler for the runtime class to make this work
"should run a Pod requesting a RuntimeClass with a configured handler",
"should reject a Pod requesting a RuntimeClass with conflicting node selector",
"should run a Pod requesting a RuntimeClass with scheduling",
// A fix is in progress: https://github.com/openshift/origin/pull/24709
`Multi-AZ Clusters should spread the pods of a replication controller across zones`,
// Upstream assumes all control plane pods are in kube-system namespace and we should revert the change
// https://github.com/kubernetes/kubernetes/commit/176c8e219f4c7b4c15d34b92c50bfa5ba02b3aba#diff-28a3131f96324063dd53e17270d435a3b0b3bd8f806ee0e33295929570eab209R78
"MetricsGrabber should grab all metrics from a Kubelet",
"MetricsGrabber should grab all metrics from API server",
"MetricsGrabber should grab all metrics from a ControllerManager",
"MetricsGrabber should grab all metrics from a Scheduler",
// https://bugzilla.redhat.com/show_bug.cgi?id=1906808
`ServiceAccounts should support OIDC discovery of service account issuer`,
// NFS umount is broken in kernels 5.7+
// https://bugzilla.redhat.com/show_bug.cgi?id=1854379
`\[sig-storage\].*\[Driver: nfs\] \[Testpattern: Dynamic PV \(default fs\)\].*subPath should be able to unmount after the subpath directory is deleted`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1986306
`\[sig-cli\] Kubectl client kubectl wait should ignore not found error with --for=delete`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1980141
`Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector`,
`Netpol NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector`,
`Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions`,
`Netpol NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions`,
`Netpol NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector`,
`Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces`,
`Netpol NetworkPolicy between server and client should enforce updated policy`,
`Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors`,
`Netpol NetworkPolicy between server and client should enforce policy based on any PodSelectors`,
`Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector`,
`Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should support a 'default-deny-ingress' policy`,
`Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should enforce policy based on Ports`,
`Netpol \[LinuxOnly\] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector`,
`Topology Hints should distribute endpoints evenly`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1908645
`\[sig-network\] Networking Granular Checks: Services should function for service endpoints using hostNetwork`,
`\[sig-network\] Networking Granular Checks: Services should function for pod-Service\(hostNetwork\)`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1952460
`\[sig-network\] Firewall rule control plane should not expose well-known ports`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1988272
`\[sig-network\] Networking should provide Internet connection for containers \[Feature:Networking-IPv6\]`,
`\[sig-network\] Networking should provider Internet connection for containers using DNS`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1957894
`\[sig-node\] Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1952457
`\[sig-node\] crictl should be able to run crictl on the node`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1953478
`\[sig-storage\] Dynamic Provisioning Invalid AWS KMS key should report an error and create no PV`,
// https://issues.redhat.com/browse/OCPBUGS-34577
`\[sig-storage\] Multi-AZ Cluster Volumes should schedule pods in the same zones as statically provisioned PVs`,
// https://issues.redhat.com/browse/OCPBUGS-34594
`\[sig-node\] \[Feature:PodLifecycleSleepAction\] when create a pod with lifecycle hook using sleep action valid prestop hook using sleep action`,
// https://issues.redhat.com/browse/OCPBUGS-38839
`\[sig-network\] \[Feature:Traffic Distribution\] when Service has trafficDistribution=PreferClose should route traffic to an endpoint that is close to the client`,
},
// tests that need to be temporarily disabled while the rebase is in progress.
"[Disabled:RebaseInProgress]": {
// https://issues.redhat.com/browse/OCPBUGS-7297
`DNS HostNetwork should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy`,
`\[sig-network\] Connectivity Pod Lifecycle should be able to connect to other Pod from a terminating Pod`, // TODO(network): simple test in k8s 1.27, needs investigation
`\[sig-cli\] Kubectl client Kubectl prune with applyset should apply and prune objects`, // TODO(workloads): alpha feature in k8s 1.27. It's failing with `error: unknown flag: --applyset`. Needs investigation
// https://issues.redhat.com/browse/OCPBUGS-17194
`\[sig-node\] ImageCredentialProvider \[Feature:KubeletCredentialProviders\] should be able to create pod with image credentials fetched from external credential provider`,
},
// tests that may work, but we don't support them
"[Disabled:Unsupported]": {
`\[Driver: rbd\]`, // OpenShift 4.x does not support Ceph RBD (use CSI instead)
`\[Driver: ceph\]`, // OpenShift 4.x does not support CephFS (use CSI instead)
`\[Driver: gluster\]`, // OpenShift 4.x does not support Gluster
`Volumes GlusterFS`, // OpenShift 4.x does not support Gluster
`GlusterDynamicProvisioner`, // OpenShift 4.x does not support Gluster
// Skip vSphere-specific storage tests. The standard in-tree storage tests for vSphere
// (prefixed with `In-tree Volumes [Driver: vsphere]`) are enough for testing this plugin.
// https://bugzilla.redhat.com/show_bug.cgi?id=2019115
`\[sig-storage\].*\[Feature:vsphere\]`,
// Also, our CI doesn't support topology, so disable those tests
`\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(delayed binding\)\] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies`,
`\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(delayed binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`,
`\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(immediate binding\)\] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies`,
`\[sig-storage\] In-tree Volumes \[Driver: vsphere\] \[Testpattern: Dynamic PV \(immediate binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`,
},
// tests too slow to be part of conformance
"[Slow]": {
`\[sig-scalability\]`, // disable from the default set for now
`should create and stop a working application`, // Inordinately slow tests
`\[Feature:PerformanceDNS\]`, // very slow
`validates that there exists conflict between pods with same hostPort and protocol but one using 0\.0\.0\.0 hostIP`, // 5m, really?
},
// tests that are known flaky
"[Flaky]": {
`Job should run a job to completion when tasks sometimes fail and are not locally restarted`, // seems flaky, also may require too many resources
// TODO(node): test works when run alone, but not in the suite in CI
`\[Feature:HPA\] Horizontal pod autoscaling \(scale resource: CPU\) \[sig-autoscaling\] ReplicationController light Should scale from 1 pod to 2 pods`,
},
// tests that must be run without competition
"[Serial]": {
`\[Disruptive\]`,
`\[Feature:Performance\]`, // requires isolation
`Service endpoints latency`, // requires low latency
`Clean up pods on node`, // schedules up to max pods per node
`DynamicProvisioner should test that deleting a claim before the volume is provisioned deletes the volume`, // test is very disruptive to other tests
`Should be able to support the 1\.7 Sample API Server using the current Aggregator`, // down apiservices break other clients today https://bugzilla.redhat.com/show_bug.cgi?id=1623195
`\[Feature:HPA\] Horizontal pod autoscaling \(scale resource: CPU\) \[sig-autoscaling\] ReplicationController light Should scale from 1 pod to 2 pods`,
`should prevent Ingress creation if more than 1 IngressClass marked as default`, // https://bugzilla.redhat.com/show_bug.cgi?id=1822286
`\[sig-network\] IngressClass \[Feature:Ingress\] should set default value on new IngressClass`, //https://bugzilla.redhat.com/show_bug.cgi?id=1833583
},
// Tests that don't pass on disconnected, either due to requiring
// internet access for GitHub (e.g. many of the s2i builds), or
// because of pullthrough not supporting ICSP (https://bugzilla.redhat.com/show_bug.cgi?id=1918376)
"[Skipped:Disconnected]": {
// Internet access required
`\[sig-network\] Networking should provide Internet connection for containers`,
},
"[Skipped:alibabacloud]": {
// LoadBalancer tests in 1.31 require explicit platform-specific skips
// https://issues.redhat.com/browse/OCPBUGS-38840
`\[Feature:LoadBalancer\]`,
},
"[Skipped:aws]": {
// LoadBalancer tests in 1.31 require explicit platform-specific skips
// https://issues.redhat.com/browse/OCPBUGS-38840
`\[sig-network\] LoadBalancers \[Feature:LoadBalancer\] .* UDP`,
`\[sig-network\] LoadBalancers \[Feature:LoadBalancer\] .* session affinity`,
},
"[Skipped:azure]": {
"Networking should provide Internet connection for containers", // Azure does not allow ICMP traffic to internet.
// Azure CSI migration changed how we treat regions without zones.
// See https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=2066865
`\[sig-storage\] In-tree Volumes \[Driver: azure-disk\] \[Testpattern: Dynamic PV \(immediate binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`,
`\[sig-storage\] In-tree Volumes \[Driver: azure-disk\] \[Testpattern: Dynamic PV \(delayed binding\)\] topology should provision a volume and schedule a pod with AllowedTopologies`,
},
"[Skipped:baremetal]": {
// LoadBalancer tests in 1.31 require explicit platform-specific skips
// https://issues.redhat.com/browse/OCPBUGS-38840
`\[Feature:LoadBalancer\]`,
},
"[Skipped:gce]": {
// Requires creation of a different compute instance in a different zone and is not compatible with volumeBindingMode of WaitForFirstConsumer which we use in 4.x
`\[sig-storage\] Multi-AZ Cluster Volumes should only be allowed to provision PDs in zones where nodes exist`,
// The following tests try to ssh directly to a node. None of our nodes have external IPs
`\[k8s.io\] \[sig-node\] crictl should be able to run crictl on the node`,
`\[sig-storage\] Flexvolumes should be mountable`,
`\[sig-storage\] Detaching volumes should not work when mount is in progress`,
// We are using ovn-kubernetes to conceal metadata
`\[sig-auth\] Metadata Concealment should run a check-metadata-concealment job to completion`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1740959
`\[sig-api-machinery\] AdmissionWebhook should be able to deny pod and configmap creation`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1745720
`\[sig-storage\] CSI Volumes \[Driver: pd.csi.storage.gke.io\]`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1749882
`\[sig-storage\] CSI Volumes CSI Topology test using GCE PD driver \[Serial\]`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1751367
`gce-localssd-scsi-fs`,
// https://bugzilla.redhat.com/show_bug.cgi?id=1750851
// should be serial if/when it's re-enabled
`\[HPA\] Horizontal pod autoscaling \(scale resource: Custom Metrics from Stackdriver\)`,
`\[Feature:CustomMetricsAutoscaling\]`,
},
"[Skipped:ibmcloud]": {
// LoadBalancer tests in 1.31 require explicit platform-specific skips
// https://issues.redhat.com/browse/OCPBUGS-38840
`\[Feature:LoadBalancer\]`,
},
"[Skipped:kubevirt]": {
// LoadBalancer tests in 1.31 require explicit platform-specific skips
// https://issues.redhat.com/browse/OCPBUGS-38840
`\[Feature:LoadBalancer\]`,
},
"[Skipped:nutanix]": {
// LoadBalancer tests in 1.31 require explicit platform-specific skips
// https://issues.redhat.com/browse/OCPBUGS-38840
`\[Feature:LoadBalancer\]`,
},
"[Skipped:openstack]": {
// LoadBalancer tests in 1.31 require explicit platform-specific skips
// https://issues.redhat.com/browse/OCPBUGS-38840
`\[Feature:LoadBalancer\]`,
},
"[Skipped:ovirt]": {
// LoadBalancer tests in 1.31 require explicit platform-specific skips
// https://issues.redhat.com/browse/OCPBUGS-38840
`\[Feature:LoadBalancer\]`,
},
"[Skipped:vsphere]": {
// LoadBalancer tests in 1.31 require explicit platform-specific skips
// https://issues.redhat.com/browse/OCPBUGS-38840
`\[Feature:LoadBalancer\]`,
},
"[sig-node]": {
`\[NodeConformance\]`,
`NodeLease`,
`lease API`,
`\[NodeFeature`,
`\[NodeAlphaFeature`,
`Probing container`,
`Security Context When creating a`,
`Downward API should create a pod that prints his name and namespace`,
`Liveness liveness pods should be automatically restarted`,
`Secret should create a pod that reads a secret`,
`Pods should delete a collection of pods`,
`Pods should run through the lifecycle of Pods and PodStatus`,
},
"[sig-cluster-lifecycle]": {
`Feature:ClusterAutoscalerScalability`,
`recreate nodes and ensure they function`,
},
"[sig-arch]": {
// not run, assigned to arch as catch-all
`\[Feature:GKELocalSSD\]`,
`\[Feature:GKENodePool\]`,
},
// These tests are skipped when openshift-tests needs to use a proxy to reach the
// cluster -- either because the test won't work while proxied, or because the test
// itself is testing a functionality using it's own proxy.
"[Skipped:Proxy]": {
// These tests setup their own proxy, which won't work when we need to access the
// cluster through a proxy.
`\[sig-cli\] Kubectl client Simple pod should support exec through an HTTP proxy`,
`\[sig-cli\] Kubectl client Simple pod should support exec through kubectl proxy`,
// Kube currently uses the x/net/websockets pkg, which doesn't work with proxies.
// See: https://github.com/kubernetes/kubernetes/pull/103595
`\[sig-node\] Pods should support retrieving logs from the container over websockets`,
`\[sig-cli\] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets`,
`\[sig-cli\] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets`,
`\[sig-node\] Pods should support remote command execution over websockets`,
// These tests are flacky and require internet access
// See https://bugzilla.redhat.com/show_bug.cgi?id=2019375
`\[sig-network\] DNS should resolve DNS of partial qualified names for services`,
`\[sig-network\] DNS should provide DNS for the cluster`,
// This test does not work when using in-proxy cluster, see https://bugzilla.redhat.com/show_bug.cgi?id=2084560
`\[sig-network\] Networking should provide Internet connection for containers`,
},
"[Skipped:SingleReplicaTopology]": {
`\[sig-apps\] Daemon set \[Serial\] should rollback without unnecessary restarts \[Conformance\]`,
`\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] doesn't evict pod with tolerations from tainted nodes`,
`\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] eventually evict pod with finite tolerations from tainted nodes`,
`\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] evicts pods from tainted nodes`,
`\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] removing taint cancels eviction \[Disruptive\] \[Conformance\]`,
`\[sig-node\] NoExecuteTaintManager Single Pod \[Serial\] pods evicted from tainted nodes have pod disruption condition`,
`\[sig-node\] NoExecuteTaintManager Multiple Pods \[Serial\] evicts pods with minTolerationSeconds \[Disruptive\] \[Conformance\]`,
`\[sig-node\] NoExecuteTaintManager Multiple Pods \[Serial\] only evicts pods without tolerations from tainted nodes`,
`\[sig-cli\] Kubectl client Kubectl taint \[Serial\] should remove all the taints with the same key off a node`,
`\[sig-network\] LoadBalancers should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes`,
`\[sig-network\] LoadBalancers should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on the same nodes`,
`\[sig-architecture\] Conformance Tests should have at least two untainted nodes`,
},
// Tests which can't be run/don't make sense to run against a cluster with all optional capabilities disabled
"[Skipped:NoOptionalCapabilities]": {
// Requires CSISnapshot capability
`\[Feature:VolumeSnapshotDataSource\]`,
// Requires Storage capability
`\[Driver: aws\]`,
`\[Feature:StorageProvider\]`,
},
// tests that don't pass under OVN Kubernetes
"[Skipped:Network/OVNKubernetes]": {
// ovn-kubernetes does not support named ports
`NetworkPolicy.*named port`,
},
"[Skipped:ibmroks]": {
// Calico is allowing the request to timeout instead of returning 'REFUSED'
// https://bugzilla.redhat.com/show_bug.cgi?id=1825021 - ROKS: calico SDN results in a request timeout when accessing services with no endpoints
`\[sig-network\] Services should be rejected when no endpoints exist`,
// Nodes in ROKS have access to secrets in the cluster to handle encryption
// https://bugzilla.redhat.com/show_bug.cgi?id=1825013 - ROKS: worker nodes have access to secrets in the cluster
`\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error`,
`\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a non-existent secret should exit with the Forbidden error, not a NotFound error`,
`\[sig-auth\] \[Feature:NodeAuthorizer\] Getting a secret for a workload the node has access to should succeed`,
`\[sig-auth\] \[Feature:NodeAuthorizer\] Getting an existing configmap should exit with the Forbidden error`,
`\[sig-auth\] \[Feature:NodeAuthorizer\] Getting an existing secret should exit with the Forbidden error`,
// Access to node external address is blocked from pods within a ROKS cluster by Calico
// https://bugzilla.redhat.com/show_bug.cgi?id=1825016 - e2e: NodeAuthenticator tests use both external and internal addresses for node
`\[sig-auth\] \[Feature:NodeAuthenticator\] The kubelet's main port 10250 should reject requests with no credentials`,
`\[sig-auth\] \[Feature:NodeAuthenticator\] The kubelet can delegate ServiceAccount tokens to the API server`,
// Mode returned by RHEL7 worker contains an extra character not expected by the test: dgtrwx vs dtrwx
// https://bugzilla.redhat.com/show_bug.cgi?id=1825024 - e2e: Failing test - HostPath should give a volume the correct mode
`\[sig-storage\] HostPath should give a volume the correct mode`,
},
}
ExcludedTests = []string{
`\[Disabled:`,
`\[Disruptive\]`,
`\[Skipped\]`,
`\[Slow\]`,
`\[Flaky\]`,
`\[Local\]`,
}
)