diff --git a/.circleci/config.yml b/.circleci/config.yml index a1ce8bdb39a..d107b673eef 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -16,125 +16,6 @@ version: 2 jobs: - - OPENSHIFT_3.10.0: - machine: true - steps: - - checkout - - run: - command: | - kube_version=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) - curl -LO https://storage.googleapis.com/kubernetes-release/release/${kube_version}/bin/linux/amd64/kubectl && \ - chmod +x kubectl && sudo mv kubectl /usr/local/bin/ - echo "Installed kubectl CLI tool" - echo "Installing nsenter" - if ! which nsenter > /dev/null; then - echo "Did not find nsenter. Installing it." - NSENTER_BUILD_DIR=$(mktemp -d /tmp/nsenter-build-XXXXXX) - pushd ${NSENTER_BUILD_DIR} - curl https://www.kernel.org/pub/linux/utils/util-linux/v2.31/util-linux-2.31.tar.gz | tar -zxf- - cd util-linux-2.31 - ./configure --without-ncurses - make nsenter - sudo cp nsenter /usr/local/bin - rm -rf "${NSENTER_BUILD_DIR}" - popd - fi - if ! which systemd-run > /dev/null; then - echo "Did not find systemd-run. Hacking it to work around Kubernetes calling it." - echo '#!/bin/bash - echo "all arguments: "$@ - while [[ $# -gt 0 ]] - do - key="$1" - if [[ "${key}" != "--" ]]; then - shift - continue - fi - shift - break - done - echo "remaining args: "$@ - exec $@' | sudo tee /usr/bin/systemd-run >/dev/null - sudo chmod +x /usr/bin/systemd-run - fi - oc_tool_version="openshift-origin-client-tools-v3.10.0-dd10d17-linux-64bit" - curl -LO https://github.com/openshift/origin/releases/download/v3.10.0/${oc_tool_version}.tar.gz && \ - tar -xvzf ${oc_tool_version}.tar.gz && chmod +x $PWD/${oc_tool_version}/oc && sudo mv $PWD/${oc_tool_version}/oc /usr/local/bin/ && \ - rm -rf ${oc_tool_version}.tar.gz - echo "Installed OC CLI tool" - tmp=`mktemp` - echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry 172.30.0.0/16"' > ${tmp} - sudo mv ${tmp} /etc/default/docker - sudo mount --make-shared / - sudo service docker restart - echo "Configured Docker daemon with insecure-registry" - oc cluster up - sleep 10 - oc login -u system:admin - echo "Configured OpenShift cluster : v3.10.0" - - run: - command: mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B install -P itests-openshift - no_output_timeout: 3600 - OPENSHIFT_3.11.0: - machine: true - steps: - - checkout - - run: - command: | - kube_version=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) - curl -LO https://storage.googleapis.com/kubernetes-release/release/${kube_version}/bin/linux/amd64/kubectl && \ - chmod +x kubectl && sudo mv kubectl /usr/local/bin/ - echo "Installed kubectl CLI tool" - echo "Installing nsenter" - if ! which nsenter > /dev/null; then - echo "Did not find nsenter. Installing it." - NSENTER_BUILD_DIR=$(mktemp -d /tmp/nsenter-build-XXXXXX) - pushd ${NSENTER_BUILD_DIR} - curl https://www.kernel.org/pub/linux/utils/util-linux/v2.31/util-linux-2.31.tar.gz | tar -zxf- - cd util-linux-2.31 - ./configure --without-ncurses - make nsenter - sudo cp nsenter /usr/local/bin - rm -rf "${NSENTER_BUILD_DIR}" - popd - fi - if ! which systemd-run > /dev/null; then - echo "Did not find systemd-run. Hacking it to work around Kubernetes calling it." - echo '#!/bin/bash - echo "all arguments: "$@ - while [[ $# -gt 0 ]] - do - key="$1" - if [[ "${key}" != "--" ]]; then - shift - continue - fi - shift - break - done - echo "remaining args: "$@ - exec $@' | sudo tee /usr/bin/systemd-run >/dev/null - sudo chmod +x /usr/bin/systemd-run - fi - oc_tool_version="openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit" - curl -LO https://github.com/openshift/origin/releases/download/v3.11.0/${oc_tool_version}.tar.gz && \ - tar -xvzf ${oc_tool_version}.tar.gz && chmod +x $PWD/${oc_tool_version}/oc && sudo mv $PWD/${oc_tool_version}/oc /usr/local/bin/ && \ - rm -rf ${oc_tool_version}.tar.gz - echo "Installed OC CLI tool" - tmp=`mktemp` - echo 'DOCKER_OPTS="$DOCKER_OPTS --insecure-registry 172.30.0.0/16"' > ${tmp} - sudo mv ${tmp} /etc/default/docker - sudo mount --make-shared / - sudo service docker restart - echo "Configured Docker daemon with insecure-registry" - oc cluster up - sleep 10 - oc login -u system:admin - echo "Configured OpenShift cluster : v3.11.0" - - run: - command: mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B install -P itests-openshift - no_output_timeout: 3600 RELEASE: machine: true steps: @@ -143,12 +24,26 @@ jobs: command: | bash ./scripts/prepare-environment.sh bash ./scripts/release.sh + javadoc: + machine: true + steps: + - checkout + - restore_cache: + key: fabric8-kubernetes-client-{{ checksum "pom.xml" }} + - run: + command: | + # Run Maven build with javadoc jar generation + mvn clean install javadoc:jar -DskipTests -Pjavadoc-test + - save_cache: + key: fabric8-kubernetes-client-{{ checksum "pom.xml" }} + paths: + - ~/.m2 + workflows: version: 2 build_and_test: jobs: - - OPENSHIFT_3.10.0 - - OPENSHIFT_3.11.0 + - javadoc - RELEASE: filters: branches: diff --git a/.github/workflows/e2e-tests.yml b/.github/workflows/e2e-tests.yml new file mode 100644 index 00000000000..e5bdd439672 --- /dev/null +++ b/.github/workflows/e2e-tests.yml @@ -0,0 +1,82 @@ +# +# Copyright (C) 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +name: E2E Tests + +on: + push: + branches: + - master + pull_request: + schedule: + - cron: '0 1 * * *' # Everyday at 1 + +env: + IT_REVISION: master + IT_DIR: kubernetes-itests + +jobs: + minikube: + name: K8S + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + kubernetes: [v1.18.3,v1.12.0] + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Minikube-Kubernetes + uses: manusa/actions-setup-minikube@v1.0.2 + with: + minikube version: v1.9.2 + kubernetes version: ${{ matrix.kubernetes }} + github token: ${{ secrets.GITHUB_TOKEN }} + - name: Setup Java 11 + uses: actions/setup-java@v1 + with: + java-version: '11' + - name: Install Kubernetes Client + run: mvn -f pom.xml -B -DskipTests clean install + - name: Install and Run Integration Tests + run: | + cd $IT_DIR + mvn test -Dtest="io.fabric8.kubernetes.**" + openshift: + name: OpenShift + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + openshift: [v3.11.0,v3.10.0] + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup OpenShift + uses: manusa/actions-setup-openshift@v1.0.3 + with: + oc version: ${{ matrix.openshift }} + github token: ${{ secrets.GITHUB_TOKEN }} + - name: Setup Java 11 + uses: actions/setup-java@v1 + with: + java-version: '11' + - name: Install Kubernetes Client + run: mvn -f pom.xml -B -DskipTests clean install + - name: Install and Run Integration Tests + run: | + cd $IT_DIR + mvn test -Dtest="io.fabric8.openshift.**" diff --git a/README.md b/README.md index 9212e3eaeb6..8e582e6244b 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,8 @@ This client provides access to the full [Kubernetes](http://kubernetes.io/) & [OpenShift](http://openshift.org/) REST APIs via a fluent DSL. [![Build](https://github.com/fabric8io/kubernetes-client/workflows/Sonar%20Scanner/badge.svg)](https://github.com/fabric8io/kubernetes-client/actions) -[![Integration Tests](https://img.shields.io/circleci/project/github/fabric8io/kubernetes-client/master.svg)](https://circleci.com/gh/fabric8io/kubernetes-client) +[![CircleCI](https://img.shields.io/circleci/project/github/fabric8io/kubernetes-client/master.svg)](https://circleci.com/gh/fabric8io/kubernetes-client) +![E2E Tests](https://github.com/fabric8io/kubernetes-client/workflows/E2E%20Tests/badge.svg) [![Release](https://img.shields.io/github/v/release/fabric8io/kubernetes-client)](https://search.maven.org/search?q=g:io.fabric8%20a:kubernetes-client) [![Twitter](https://img.shields.io/twitter/follow/fabric8io?style=social)](https://twitter.com/fabric8io) [![Bugs](https://sonarcloud.io/api/project_badges/measure?project=fabric8io_kubernetes-client&metric=bugs)](https://sonarcloud.io/dashboard?id=fabric8io_kubernetes-client) diff --git a/kubernetes-client/src/main/java/io/fabric8/kubernetes/client/internal/readiness/Readiness.java b/kubernetes-client/src/main/java/io/fabric8/kubernetes/client/internal/readiness/Readiness.java index 60cb3aeb785..495b67878b1 100644 --- a/kubernetes-client/src/main/java/io/fabric8/kubernetes/client/internal/readiness/Readiness.java +++ b/kubernetes-client/src/main/java/io/fabric8/kubernetes/client/internal/readiness/Readiness.java @@ -114,7 +114,7 @@ public static boolean isDeploymentReady(Deployment d) { } return spec.getReplicas().intValue() == status.getReplicas() && - spec.getReplicas().intValue() <= status.getAvailableReplicas(); + spec.getReplicas() <= status.getAvailableReplicas(); } diff --git a/kubernetes-itests/src/test/java/io/fabric8/commons/ClusterEntity.java b/kubernetes-itests/src/test/java/io/fabric8/commons/ClusterEntity.java new file mode 100644 index 00000000000..ebbcb06b5fc --- /dev/null +++ b/kubernetes-itests/src/test/java/io/fabric8/commons/ClusterEntity.java @@ -0,0 +1,76 @@ +/** + * Copyright (C) 2015 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.commons; + +import io.fabric8.kubernetes.api.model.DeletionPropagation; +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.Namespace; +import io.fabric8.kubernetes.api.model.NamespaceList; +import io.fabric8.kubernetes.client.DefaultKubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.openshift.client.DefaultOpenShiftClient; +import io.fabric8.openshift.client.OpenShiftClient; + +import java.io.InputStream; +import java.util.List; +import java.util.Map; + +public class ClusterEntity { + + public static final String FRAMEWORK = "framework"; + public static final String ARQUILLIAN = "arquillian"; + + public static void apply(InputStream inputStream) { + try (KubernetesClient client = new DefaultKubernetesClient()) { + String namespace = getArquillianNamespace(); + if (namespace != null) { + client.load(inputStream).inNamespace(namespace).createOrReplace(); + } + } + } + + public static void remove(InputStream inputStream) { + try (KubernetesClient client = new DefaultKubernetesClient()) { + List items = client.load(inputStream).get(); + client.resourceList(items).inNamespace(getArquillianNamespace()).withPropagationPolicy(DeletionPropagation.BACKGROUND).delete(); + } + } + + public static String getArquillianNamespace() { + try (KubernetesClient client = new DefaultKubernetesClient()) { + NamespaceList namespaceList = client.namespaces().list(); + for (Namespace namespace : namespaceList.getItems()) { + + // Namespace should not be in terminating state and it should have + // labels. + if (namespace.getMetadata().getDeletionTimestamp() == null) { + if (namespace.getMetadata().getLabels() != null) { + Map labels = namespace.getMetadata().getLabels(); + if (labels.containsKey(FRAMEWORK) && + labels.get(FRAMEWORK).equals(ARQUILLIAN)) { + return namespace.getMetadata().getName(); + } + } + + if (namespace.getMetadata().getName().startsWith("itest-")) { + return namespace.getMetadata().getName(); + } + } + } + } + return null; + } +} diff --git a/kubernetes-itests/src/test/java/io/fabric8/commons/DeleteEntity.java b/kubernetes-itests/src/test/java/io/fabric8/commons/DeleteEntity.java index 49f284a016e..2aa4bb04747 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/commons/DeleteEntity.java +++ b/kubernetes-itests/src/test/java/io/fabric8/commons/DeleteEntity.java @@ -15,9 +15,11 @@ */ package io.fabric8.commons; +import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.openshift.client.OpenShiftClient; +import java.util.List; import java.util.concurrent.Callable; public class DeleteEntity implements Callable { @@ -38,49 +40,74 @@ public DeleteEntity(Class typeParameterClass, KubernetesClient client, String public Boolean call() { switch (this.typeParameterClass.getSimpleName()) { case "Route": - return ((OpenShiftClient)this.client).routes().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(((OpenShiftClient)this.client).routes().inNamespace(this.namespace).list().getItems()); case "Template": - return ((OpenShiftClient)this.client).templates().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(((OpenShiftClient)this.client).templates().inNamespace(this.namespace).list().getItems()); case "ImageStream": - return ((OpenShiftClient)this.client).imageStreams().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(((OpenShiftClient)this.client).imageStreams().inNamespace(this.namespace).list().getItems()); + case "ImageStreamTag": + return isDeleted(((OpenShiftClient)this.client).imageStreamTags().inNamespace(this.namespace).withName(this.name).get()); case "DeploymentConfig": - return ((OpenShiftClient)this.client).deploymentConfigs().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(((OpenShiftClient)this.client).deploymentConfigs().inNamespace(this.namespace).list().getItems()); case "BuildConfig": - return ((OpenShiftClient)this.client).buildConfigs().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(((OpenShiftClient)this.client).buildConfigs().inNamespace(this.namespace).list().getItems()); case "ConfigMap": - return this.client.configMaps().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(this.client.configMaps().inNamespace(this.namespace).list().getItems()); case "StatefulSet": - return this.client.apps().statefulSets().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(this.client.apps().statefulSets().inNamespace(this.namespace).list().getItems()); case "Deployment": - return this.client.apps().deployments().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(this.client.apps().deployments().inNamespace(this.namespace).list().getItems()); case "Service": - return this.client.services().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(this.client.services().inNamespace(this.namespace).list().getItems()); case "ServiceAccount": - return client.serviceAccounts().inNamespace(this.namespace).withName(this.name).get() == null; + return isDeleted(this.client.serviceAccounts().inNamespace(this.namespace).withName(this.name).get()); case "Secret": - return this.client.secrets().inNamespace(this.namespace).withName(this.name).get() == null; + return isDeleted(this.client.secrets().inNamespace(this.namespace).withName(this.name).get()); case "ReplicationController": - return this.client.replicationControllers().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(this.client.replicationControllers().inNamespace(this.namespace).list().getItems()); case "ReplicaSet": - return this.client.apps().replicaSets().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(this.client.apps().replicaSets().inNamespace(this.namespace).list().getItems()); case "NetworkPolicy": - return this.client.network().networkPolicies().inNamespace(this.namespace).list().getItems().size() == 0; + return isDeleted(this.client.network().networkPolicies().inNamespace(this.namespace).withName(name).get()); case "SecurityContextConstraints": - return ((OpenShiftClient)this.client).securityContextConstraints().withName(this.name).get() == null; + return isDeleted(((OpenShiftClient)this.client).securityContextConstraints().withName(this.name).get()); case "ClusterRoleBinding": - return this.client.rbac().clusterRoleBindings().withName(this.name).get() == null; + return isDeleted(this.client.rbac().clusterRoleBindings().withName(this.name).get()); case "ClusterRole": - return this.client.rbac().clusterRoles().withName(this.name).get() == null; + return isDeleted(this.client.rbac().clusterRoles().withName(this.name).get()); case "CustomResourceDefinition": - return this.client.customResourceDefinitions().withName(this.name).get() == null; + return isDeleted(this.client.customResourceDefinitions().withName(this.name).get()); case "RoleBinding": - return this.client.rbac().roleBindings().inNamespace(this.namespace).withName(this.name).get() == null; + return isDeleted(this.client.rbac().roleBindings().inNamespace(this.namespace).withName(this.name).get()); case "Role": - return this.client.rbac().roles().inNamespace(this.namespace).withName(this.name).get() == null; + return isDeleted(this.client.rbac().roles().inNamespace(this.namespace).withName(this.name).get()); case "PodSecurityPolicy": - return this.client.extensions().podSecurityPolicies().withName(this.name).get() == null; + return isDeleted(this.client.extensions().podSecurityPolicies().withName(this.name).get()); default: return false; } } + + private boolean isDeleted(List items) { + // If zero items exists of this kind, everything is cleaned up. + if (items.size() == 0) { + return true; + } + + // If not, iterate through list and check metadata.deletionTimestamp field + for (T item : items) { + if (item.getMetadata().getDeletionTimestamp() == null) { + return false; + } + } + return true; + } + + private boolean isDeleted(HasMetadata item) { + if (item == null) { + return true; + } + + return item.getMetadata().getDeletionTimestamp() != null; + } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/commons/ReadyEntity.java b/kubernetes-itests/src/test/java/io/fabric8/commons/ReadyEntity.java index aed33dff698..68034db4ad7 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/commons/ReadyEntity.java +++ b/kubernetes-itests/src/test/java/io/fabric8/commons/ReadyEntity.java @@ -41,12 +41,16 @@ public Boolean call() { return ((OpenShiftClient)this.client).templates().inNamespace(this.namespace).withName(this.name).get() != null; case "ImageStream": return ((OpenShiftClient)this.client).imageStreams().inNamespace(this.namespace).withName(this.name).get()!= null; + case "ImageStreamTag": + return ((OpenShiftClient)this.client).imageStreamTags().inNamespace(this.namespace).withName(this.name).get() != null; case "DeploymentConfig": return ((OpenShiftClient)this.client).deploymentConfigs().inNamespace(this.namespace).withName(this.name).get()!= null; case "BuildConfig": return ((OpenShiftClient)this.client).buildConfigs().inNamespace(this.namespace).withName(this.name).get()!= null; case "ConfigMap": return this.client.configMaps().inNamespace(this.namespace).withName(this.name).get()!= null; + case "CustomResourceDefinition": + return this.client.customResourceDefinitions().withName(this.namespace).get() != null; case "StatefulSet": return this.client.apps().statefulSets().inNamespace(this.namespace).withName(this.name).get()!= null; case "Deployment": diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ClusterRoleBindingIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ClusterRoleBindingIT.java index 55219f06864..f15db383819 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ClusterRoleBindingIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ClusterRoleBindingIT.java @@ -15,20 +15,18 @@ */ package io.fabric8.kubernetes; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.DeleteEntity; import io.fabric8.kubernetes.api.model.rbac.ClusterRoleBinding; -import io.fabric8.kubernetes.api.model.rbac.ClusterRoleBindingBuilder; import io.fabric8.kubernetes.api.model.rbac.ClusterRoleBindingList; -import io.fabric8.kubernetes.api.model.rbac.RoleRefBuilder; -import io.fabric8.kubernetes.api.model.rbac.SubjectBuilder; import io.fabric8.kubernetes.client.KubernetesClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -38,7 +36,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeFalse; @RunWith(ArquillianConditionalRunner.class) @RequiresKubernetes @@ -52,44 +49,20 @@ public class ClusterRoleBindingIT { private ClusterRoleBinding clusterRoleBinding; - @Before - public void init() { - - // Do not run tests on opeshift 3.6.0 and 3.6.1 - assumeFalse(client.getVersion().getMajor().equalsIgnoreCase("1") - && client.getVersion().getMinor().startsWith("6")); - - clusterRoleBinding = new ClusterRoleBindingBuilder() - .withNewMetadata() - .withName("read-nodes") - .endMetadata() - .addToSubjects(0, new SubjectBuilder() - .withApiGroup("rbac.authorization.k8s.io") - .withKind("User") - .withName("jane") - .withNamespace("default") - .build() - ) - .withRoleRef(new RoleRefBuilder() - .withApiGroup("rbac.authorization.k8s.io") - .withKind("ClusterRole") - .withName("node-reader") - .build() - ) - .build(); - - client.rbac().clusterRoleBindings().createOrReplace(clusterRoleBinding); + @BeforeClass + public static void init() { + ClusterEntity.apply(ClusterRoleBindingIT.class.getResourceAsStream("/clusterrolebinding-it.yml")); } @Test public void get() { - clusterRoleBinding = client.rbac().clusterRoleBindings().withName("read-nodes").get(); + clusterRoleBinding = client.rbac().clusterRoleBindings().withName("read-nodes-get").get(); assertNotNull(clusterRoleBinding); assertEquals("ClusterRoleBinding", clusterRoleBinding.getKind()); assertNotNull(clusterRoleBinding.getMetadata()); - assertEquals("read-nodes", clusterRoleBinding.getMetadata().getName()); + assertEquals("read-nodes-get", clusterRoleBinding.getMetadata().getName()); assertNotNull(clusterRoleBinding.getSubjects()); assertEquals(1, clusterRoleBinding.getSubjects().size()); assertEquals("rbac.authorization.k8s.io", clusterRoleBinding.getSubjects().get(0).getApiGroup()); @@ -98,7 +71,7 @@ public void get() { assertEquals("default", clusterRoleBinding.getSubjects().get(0).getNamespace()); assertNotNull(clusterRoleBinding.getRoleRef()); assertEquals("ClusterRole", clusterRoleBinding.getRoleRef().getKind()); - assertEquals("node-reader", clusterRoleBinding.getRoleRef().getName()); + assertEquals("secret-reader", clusterRoleBinding.getRoleRef().getName()); assertEquals("rbac.authorization.k8s.io", clusterRoleBinding.getRoleRef().getApiGroup()); } @@ -133,10 +106,10 @@ public void list() { assertNotNull(clusterRoleBindingList.getItems()); for (ClusterRoleBinding clusterRoleBinding : clusterRoleBindingList.getItems()) { - if (clusterRoleBinding.getMetadata().getName().equals("read-nodes")) { + if (clusterRoleBinding.getMetadata().getName().equals("read-nodes-list")) { assertEquals("ClusterRoleBinding", clusterRoleBinding.getKind()); assertNotNull(clusterRoleBinding.getMetadata()); - assertEquals("read-nodes", clusterRoleBinding.getMetadata().getName()); + assertEquals("read-nodes-list", clusterRoleBinding.getMetadata().getName()); assertNotNull(clusterRoleBinding.getSubjects()); assertEquals(1, clusterRoleBinding.getSubjects().size()); assertEquals("rbac.authorization.k8s.io", clusterRoleBinding.getSubjects().get(0).getApiGroup()); @@ -145,26 +118,25 @@ public void list() { assertEquals("default", clusterRoleBinding.getSubjects().get(0).getNamespace()); assertNotNull(clusterRoleBinding.getRoleRef()); assertEquals("ClusterRole", clusterRoleBinding.getRoleRef().getKind()); - assertEquals("node-reader", clusterRoleBinding.getRoleRef().getName()); + assertEquals("secret-reader", clusterRoleBinding.getRoleRef().getName()); found = true; } } - assertEquals(true, found); - + assertTrue(found); } @Test public void update() { - clusterRoleBinding = client.rbac().clusterRoleBindings().withName("read-nodes").edit() + clusterRoleBinding = client.rbac().clusterRoleBindings().withName("read-nodes-update").edit() .editSubject(0).withName("jane-new").endSubject().done(); assertNotNull(clusterRoleBinding); assertEquals("ClusterRoleBinding", clusterRoleBinding.getKind()); assertNotNull(clusterRoleBinding.getMetadata()); - assertEquals("read-nodes", clusterRoleBinding.getMetadata().getName()); + assertEquals("read-nodes-update", clusterRoleBinding.getMetadata().getName()); assertNotNull(clusterRoleBinding.getSubjects()); assertEquals(1, clusterRoleBinding.getSubjects().size()); assertEquals("rbac.authorization.k8s.io", clusterRoleBinding.getSubjects().get(0).getApiGroup()); @@ -173,16 +145,15 @@ public void update() { assertEquals("default", clusterRoleBinding.getSubjects().get(0).getNamespace()); assertNotNull(clusterRoleBinding.getRoleRef()); assertEquals("ClusterRole", clusterRoleBinding.getRoleRef().getKind()); - assertEquals("node-reader", clusterRoleBinding.getRoleRef().getName()); + assertEquals("secret-reader", clusterRoleBinding.getRoleRef().getName()); assertEquals("rbac.authorization.k8s.io", clusterRoleBinding.getRoleRef().getApiGroup()); } @Test public void delete() { - ClusterRoleBindingList clusterRoleBindingListBefore = client.rbac().clusterRoleBindings().list(); - boolean deleted = client.rbac().clusterRoleBindings().withName("read-nodes").delete(); + boolean deleted = client.rbac().clusterRoleBindings().withName("read-nodes-delete").delete(); assertTrue(deleted); DeleteEntity clusterRoleBindingDeleteEntity = new DeleteEntity<>(ClusterRoleBinding.class, client, "read-nodes", null); @@ -190,12 +161,10 @@ public void delete() { ClusterRoleBindingList clusterRoleBindingListAfter = client.rbac().clusterRoleBindings().list(); assertEquals(clusterRoleBindingListBefore.getItems().size()-1,clusterRoleBindingListAfter.getItems().size()); - } - @After - public void cleanup() { - client.rbac().clusterRoleBindings().withName("read-nodes").delete(); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(ClusterRoleBindingIT.class.getResourceAsStream("/clusterrolebinding-it.yml")); } - } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ClusterRoleIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ClusterRoleIT.java index 49ed4c4a3e0..f5fd4939aed 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ClusterRoleIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ClusterRoleIT.java @@ -15,29 +15,26 @@ */ package io.fabric8.kubernetes; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.DeleteEntity; import io.fabric8.kubernetes.api.model.rbac.ClusterRole; import io.fabric8.kubernetes.api.model.rbac.ClusterRoleList; -import io.fabric8.kubernetes.api.model.rbac.ClusterRoleBuilder; -import io.fabric8.kubernetes.api.model.rbac.PolicyRuleBuilder; import io.fabric8.kubernetes.client.KubernetesClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.runner.RunWith; -import org.junit.Before; import org.junit.Test; -import org.junit.After; import static org.awaitility.Awaitility.await; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeFalse; @RunWith(ArquillianConditionalRunner.class) @RequiresKubernetes @@ -51,50 +48,29 @@ public class ClusterRoleIT { private ClusterRole clusterRole; - @Before - public void init() { - // Do not run tests on opeshift 3.6.0 and 3.6.1 - assumeFalse(client.getVersion().getMajor().equalsIgnoreCase("1") - && client.getVersion().getMinor().startsWith("6")); - - ClusterRole kubernetesclusterRole = new ClusterRoleBuilder() - .withNewMetadata() - .withName("node-reader") - .endMetadata() - .addToRules(0, new PolicyRuleBuilder() - .addToApiGroups(0,"") - .addToResourceNames(0,"my-node") - .addToResources(0,"nodes") - .addToVerbs(0, "get") - .addToVerbs(1, "watch") - .addToVerbs(2, "list") - .build() - ) - .build(); - - client.rbac().clusterRoles().createOrReplace(kubernetesclusterRole); + @BeforeClass + public static void init() { + ClusterEntity.apply(ClusterRoleIT.class.getResourceAsStream("/clusterrole-it.yml")); } @Test public void get() { - clusterRole = client.rbac().clusterRoles().withName("node-reader").get(); + clusterRole = client.rbac().clusterRoles().withName("node-reader-get").get(); assertNotNull(clusterRole); assertEquals("ClusterRole", clusterRole.getKind()); assertNotNull(clusterRole.getMetadata()); - assertEquals("node-reader", clusterRole.getMetadata().getName()); + assertEquals("node-reader-get", clusterRole.getMetadata().getName()); assertNotNull(clusterRole.getRules()); assertEquals(1, clusterRole.getRules().size()); assertNotNull(clusterRole.getRules().get(0).getApiGroups()); assertEquals(1, clusterRole.getRules().get(0).getApiGroups().size()); assertEquals("", clusterRole.getRules().get(0).getApiGroups().get(0)); assertNotNull(clusterRole.getRules().get(0).getResourceNames()); - assertEquals(1, clusterRole.getRules().get(0).getResourceNames().size()); - assertEquals("my-node", clusterRole.getRules().get(0).getResourceNames().get(0)); assertNotNull(clusterRole.getRules().get(0).getResources()); assertEquals(1, clusterRole.getRules().get(0).getResources().size()); - assertEquals("nodes", clusterRole.getRules().get(0).getResources().get(0)); + assertEquals("secrets", clusterRole.getRules().get(0).getResources().get(0)); assertNotNull(clusterRole.getRules().get(0).getVerbs()); assertEquals(3, clusterRole.getRules().get(0).getVerbs().size()); assertEquals("get", clusterRole.getRules().get(0).getVerbs().get(0)); @@ -117,12 +93,7 @@ public void load() { assertNotNull(aClusterRole.getRules().get(0).getApiGroups()); assertEquals(1, aClusterRole.getRules().get(0).getApiGroups().size()); assertEquals("", aClusterRole.getRules().get(0).getApiGroups().get(0)); - assertNotNull(aClusterRole.getRules().get(0).getNonResourceURLs()); - assertEquals(1, aClusterRole.getRules().get(0).getNonResourceURLs().size()); - assertEquals("/healthz", aClusterRole.getRules().get(0).getNonResourceURLs().get(0)); assertNotNull(aClusterRole.getRules().get(0).getResourceNames()); - assertEquals(1, aClusterRole.getRules().get(0).getResourceNames().size()); - assertEquals("my-node", aClusterRole.getRules().get(0).getResourceNames().get(0)); assertNotNull(aClusterRole.getRules().get(0).getResources()); assertEquals(1, aClusterRole.getRules().get(0).getResources().size()); assertEquals("nodes", aClusterRole.getRules().get(0).getResources().get(0)); @@ -143,21 +114,19 @@ public void list() { assertNotNull(clusterRoleList.getItems()); for (ClusterRole clusterRole : clusterRoleList.getItems()) { - if (clusterRole.getMetadata().getName().equals("node-reader")) { + if (clusterRole.getMetadata().getName().equals("node-reader-list")) { assertEquals("ClusterRole", clusterRole.getKind()); assertNotNull(clusterRole.getMetadata()); - assertEquals("node-reader", clusterRole.getMetadata().getName()); + assertEquals("node-reader-list", clusterRole.getMetadata().getName()); assertNotNull(clusterRole.getRules()); assertEquals(1, clusterRole.getRules().size()); assertNotNull(clusterRole.getRules().get(0).getApiGroups()); assertEquals(1, clusterRole.getRules().get(0).getApiGroups().size()); assertEquals("", clusterRole.getRules().get(0).getApiGroups().get(0)); assertNotNull(clusterRole.getRules().get(0).getResourceNames()); - assertEquals(1, clusterRole.getRules().get(0).getResourceNames().size()); - assertEquals("my-node", clusterRole.getRules().get(0).getResourceNames().get(0)); assertNotNull(clusterRole.getRules().get(0).getResources()); assertEquals(1, clusterRole.getRules().get(0).getResources().size()); - assertEquals("nodes", clusterRole.getRules().get(0).getResources().get(0)); + assertEquals("secrets", clusterRole.getRules().get(0).getResources().get(0)); assertNotNull(clusterRole.getRules().get(0).getVerbs()); assertEquals(3, clusterRole.getRules().get(0).getVerbs().size()); assertEquals("get", clusterRole.getRules().get(0).getVerbs().get(0)); @@ -167,19 +136,19 @@ public void list() { } } - assertEquals(true, found); + assertTrue(found); } @Test public void update() { - clusterRole = client.rbac().clusterRoles().withName("node-reader").edit() + clusterRole = client.rbac().clusterRoles().withName("node-reader-update").edit() .editRule(0).addToApiGroups(1, "extensions").endRule().done(); assertNotNull(clusterRole); assertEquals("ClusterRole", clusterRole.getKind()); assertNotNull(clusterRole.getMetadata()); - assertEquals("node-reader", clusterRole.getMetadata().getName()); + assertEquals("node-reader-update", clusterRole.getMetadata().getName()); assertNotNull(clusterRole.getRules()); assertEquals(1, clusterRole.getRules().size()); assertNotNull(clusterRole.getRules().get(0).getApiGroups()); @@ -187,11 +156,9 @@ public void update() { assertEquals("", clusterRole.getRules().get(0).getApiGroups().get(0)); assertEquals("extensions", clusterRole.getRules().get(0).getApiGroups().get(1)); assertNotNull(clusterRole.getRules().get(0).getResourceNames()); - assertEquals(1, clusterRole.getRules().get(0).getResourceNames().size()); - assertEquals("my-node", clusterRole.getRules().get(0).getResourceNames().get(0)); assertNotNull(clusterRole.getRules().get(0).getResources()); assertEquals(1, clusterRole.getRules().get(0).getResources().size()); - assertEquals("nodes", clusterRole.getRules().get(0).getResources().get(0)); + assertEquals("secrets", clusterRole.getRules().get(0).getResources().get(0)); assertNotNull(clusterRole.getRules().get(0).getVerbs()); assertEquals(3, clusterRole.getRules().get(0).getVerbs().size()); assertEquals("get", clusterRole.getRules().get(0).getVerbs().get(0)); @@ -204,7 +171,7 @@ public void delete() { ClusterRoleList clusterRoleListBefore = client.rbac().clusterRoles().list(); - boolean deleted = client.rbac().clusterRoles().withName("node-reader").delete(); + boolean deleted = client.rbac().clusterRoles().withName("node-reader-delete").delete(); assertTrue(deleted); DeleteEntity deleteEntity = new DeleteEntity<>(ClusterRole.class, client, "node-reader", null); @@ -214,11 +181,9 @@ public void delete() { assertEquals(clusterRoleListBefore.getItems().size()-1,clusterRoleListAfter.getItems().size()); } - @After - public void cleanup() { - client.rbac().clusterRoles().withName("node-reader").delete(); - DeleteEntity deleteEntity = new DeleteEntity<>(ClusterRole.class, client, "node-reader", null); - await().atMost(30, TimeUnit.SECONDS).until(deleteEntity); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(ClusterRoleBindingIT.class.getResourceAsStream("/clusterrole-it.yml")); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ConfigMapIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ConfigMapIT.java index 0720a45d16e..319f76f3b8a 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ConfigMapIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ConfigMapIT.java @@ -16,24 +16,20 @@ package io.fabric8.kubernetes; -import io.fabric8.commons.DeleteEntity; -import io.fabric8.commons.ReadyEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.kubernetes.api.model.*; import io.fabric8.kubernetes.client.KubernetesClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; -import java.util.concurrent.TimeUnit; - import static junit.framework.TestCase.assertTrue; import static org.assertj.core.api.AssertionsForClassTypes.assertThat; -import static org.awaitility.Awaitility.await; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -46,88 +42,49 @@ public class ConfigMapIT { @ArquillianResource Session session; - private ConfigMap configMap1, configMap2; - - private String currentNamespace; + private ConfigMap configMap; - @Before - public void init() { - currentNamespace = session.getNamespace(); - configMap1 = new ConfigMapBuilder() - .withNewMetadata().withName("configmap1").endMetadata() - .addToData("1", "one") - .addToData("2", "two") - .addToData("3", "three") - .build(); - configMap2 = new ConfigMapBuilder() - .withNewMetadata().withName("configmap2").endMetadata() - .addToData("PostgreSQL", "Free Open Source Enterprise Database") - .addToData("DB2", "Enterprise Database , It's expensive") - .addToData("Oracle", "Enterprise Database , It's expensive") - .addToData("MySQL", "Free Open SourceDatabase") - .build(); - client.configMaps().inNamespace(currentNamespace).create(configMap1); - client.configMaps().inNamespace(currentNamespace).create(configMap2); + @BeforeClass + public static void init() { + ClusterEntity.apply(ConfigMapIT.class.getResourceAsStream("/configmap-it.yml")); } @Test public void load() { - ConfigMap aConfigMap = client.configMaps().inNamespace(currentNamespace).load(getClass().getResourceAsStream("/test-configmap.yml")).get(); + ConfigMap aConfigMap = client.configMaps().inNamespace(session.getNamespace()).load(getClass().getResourceAsStream("/test-configmap.yml")).get(); assertThat(aConfigMap).isNotNull(); assertEquals("game-config", aConfigMap.getMetadata().getName()); } @Test public void get() { - configMap1 = client.configMaps().inNamespace(currentNamespace).withName("configmap1").get(); - assertThat(configMap1).isNotNull(); - configMap2 = client.configMaps().inNamespace(currentNamespace).withName("configmap2").get(); - assertThat(configMap2).isNotNull(); + configMap = client.configMaps().inNamespace(session.getNamespace()).withName("configmap-get").get(); + assertThat(configMap).isNotNull(); } @Test public void list() { - ConfigMapList aConfigMapList = client.configMaps().inNamespace(currentNamespace).list(); + ConfigMapList aConfigMapList = client.configMaps().inNamespace(session.getNamespace()).list(); assertNotNull(aConfigMapList); - assertEquals(2, aConfigMapList.getItems().size()); + assertTrue(aConfigMapList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity configMap1Ready = new ReadyEntity<>(ConfigMap.class, client, "configmap1", currentNamespace); - ReadyEntity configMap2Ready = new ReadyEntity<>(ConfigMap.class, client, "configmap2", currentNamespace); - - configMap1 = client.configMaps().inNamespace(currentNamespace).withName("configmap1").edit() - .addToData("4", "four").done(); - - configMap2 = client.configMaps().inNamespace(currentNamespace).withName("configmap2").edit() + configMap = client.configMaps().inNamespace(session.getNamespace()).withName("configmap-update").edit() .addToData("MSSQL", "Microsoft Database").done(); - await().atMost(30, TimeUnit.SECONDS).until(configMap1Ready); - await().atMost(30, TimeUnit.SECONDS).until(configMap2Ready); - assertNotNull(configMap1); - assertNotNull(configMap2); - assertEquals("four", configMap1.getData().get("4")); - assertEquals("Microsoft Database", configMap2.getData().get("MSSQL")); + assertNotNull(configMap); + assertEquals("Microsoft Database", configMap.getData().get("MSSQL")); } @Test public void delete() { - ReadyEntity configMap1Ready = new ReadyEntity<>(ConfigMap.class, client, "configmap1", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(configMap1Ready); - assertTrue(client.configMaps().inNamespace(currentNamespace).withName("configmap1").delete()); - - ReadyEntity configMap2Ready = new ReadyEntity<>(ConfigMap.class, client, "configmap2", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(configMap2Ready); - assertTrue(client.configMaps().inNamespace(currentNamespace).withName("configmap2").delete()); + assertTrue(client.configMaps().inNamespace(session.getNamespace()).withName("configmap-delete").delete()); } - @After - public void cleanup() throws InterruptedException { - if (client.configMaps().inNamespace(currentNamespace).list().getItems().size()!= 0) { - client.configMaps().inNamespace(currentNamespace).delete(); - } - DeleteEntity configMapDelete = new DeleteEntity<>(ConfigMap.class, client, "configmap1", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(configMapDelete); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(ConfigMapIT.class.getResourceAsStream("/configmap-it.yml")); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/CronJobIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/CronJobIT.java index 32c04c8fb7b..ab5f417e759 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/CronJobIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/CronJobIT.java @@ -16,26 +16,23 @@ package io.fabric8.kubernetes; +import io.fabric8.commons.ClusterEntity; import io.fabric8.kubernetes.api.model.batch.CronJob; -import io.fabric8.kubernetes.api.model.batch.CronJobBuilder; import io.fabric8.kubernetes.api.model.batch.CronJobList; import io.fabric8.kubernetes.client.KubernetesClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; -import java.util.Arrays; - import static junit.framework.TestCase.assertNotNull; import static junit.framework.TestCase.assertTrue; import static org.assertj.core.api.AssertionsForClassTypes.assertThat; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; @RunWith(ArquillianConditionalRunner.class) @RequiresKubernetes @@ -46,59 +43,13 @@ public class CronJobIT { @ArquillianResource Session session; - private CronJob cronJob1, cronJob2; + private CronJob cronJob1; private String currentNamespace; - @Before - public void init() { - currentNamespace = session.getNamespace(); - cronJob1 = new CronJobBuilder().withApiVersion("batch/v1beta1").withNewMetadata() - .withName("cronjob1") - .endMetadata() - .withNewSpec() - .withSchedule("*/1 * * * *") - .withNewJobTemplate() - .withNewSpec() - .withNewTemplate() - .withNewSpec() - .addNewContainer() - .withName("hello") - .withImage("busybox") - .withArgs("/bin/sh", "-c", "date; echo Hello from Kubernetes") - .endContainer() - .withRestartPolicy("OnFailure") - .endSpec() - .endTemplate() - .endSpec() - .endJobTemplate() - .endSpec() - .build(); - cronJob2 = new CronJobBuilder().withApiVersion("batch/v1beta1").withNewMetadata() - .withName("cronjob2") - .endMetadata() - .withNewSpec() - .withSchedule("*/1 * * * *") - .withNewJobTemplate() - .withNewSpec() - .withNewTemplate() - .withNewSpec() - .addNewContainer() - .withName("pi") - .withImage("perl") - .withCommand("perl") - .withArgs(Arrays.asList("-Mbignum=bpi", "-wle", "print bpi(2000)")) - .endContainer() - .withRestartPolicy("OnFailure") - .endSpec() - .endTemplate() - .endSpec() - .endJobTemplate() - .endSpec() - .build(); - - client.batch().cronjobs().inNamespace(currentNamespace).create(cronJob1); - client.batch().cronjobs().inNamespace(currentNamespace).create(cronJob2); + @BeforeClass + public static void init() { + ClusterEntity.apply(CronJobIT.class.getResourceAsStream("/cronjob-it.yml")); } @Test @@ -110,47 +61,39 @@ public void load() { @Test public void get() { - cronJob1 = client.batch().cronjobs().inNamespace(currentNamespace).withName(cronJob1.getMetadata().getName()).get(); + currentNamespace = session.getNamespace(); + cronJob1 = client.batch().cronjobs().inNamespace(currentNamespace).withName("hello-get").get(); assertThat(cronJob1).isNotNull(); - cronJob2 = client.batch().cronjobs().inNamespace(currentNamespace).withName(cronJob2.getMetadata().getName()).get(); - assertThat(cronJob2).isNotNull(); } @Test public void list() { + currentNamespace = session.getNamespace(); CronJobList cronJobList = client.batch().cronjobs().inNamespace(currentNamespace).list(); assertNotNull(cronJobList); - assertEquals(2, cronJobList.getItems().size()); + assertTrue(cronJobList.getItems().size() >= 1); } @Test public void update() { - cronJob1 = client.batch().cronjobs().inNamespace(currentNamespace).withName(cronJob1.getMetadata().getName()) + currentNamespace = session.getNamespace(); + cronJob1 = client.batch().cronjobs().inNamespace(currentNamespace).withName("hello-update") .edit() .editSpec() .withSchedule("*/1 * * * *") .endSpec() .done(); - cronJob2 = client.batch().cronjobs().inNamespace(currentNamespace).withName(cronJob2.getMetadata().getName()) - .edit() - .editSpec() - .withSuspend(false) - .endSpec() - .done(); - assertEquals("*/1 * * * *", cronJob1.getSpec().getSchedule()); - assertFalse(cronJob2.getSpec().getSuspend()); - } @Test public void delete() { - assertTrue(client.batch().cronjobs().inNamespace(currentNamespace).withName(cronJob1.getMetadata().getName()).delete()); - assertTrue(client.batch().cronjobs().inNamespace(currentNamespace).withName(cronJob2.getMetadata().getName()).delete()); + currentNamespace = session.getNamespace(); + assertTrue(client.batch().cronjobs().inNamespace(currentNamespace).withName("hello-delete").delete()); } - @After - public void cleanup() { - client.batch().cronjobs().inNamespace(currentNamespace).delete(); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(CronJobIT.class.getResourceAsStream("/cronjob-it.yml")); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/DeploymentIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/DeploymentIT.java index b90c579d528..988d1597781 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/DeploymentIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/DeploymentIT.java @@ -16,11 +16,10 @@ package io.fabric8.kubernetes; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.apps.Deployment; -import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder; import io.fabric8.kubernetes.api.model.apps.DeploymentList; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.internal.readiness.Readiness; @@ -30,8 +29,6 @@ import org.jboss.arquillian.test.api.ArquillianResource; import org.junit.*; import org.junit.runner.RunWith; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.util.List; import java.util.concurrent.TimeUnit; @@ -53,103 +50,54 @@ public class DeploymentIT { private Deployment deployment1; - private String currentNamespace; - - private static final Logger logger = LoggerFactory.getLogger(DeploymentIT.class); - - @Before - public void init() { - - currentNamespace = session.getNamespace(); - - client.apps().deployments().inNamespace(currentNamespace).delete(); - client.pods().inNamespace(currentNamespace).delete(); - - deployment1 = new DeploymentBuilder() - .withNewMetadata() - .withName("deployment1") - .addToLabels("test", "deployment") - .endMetadata() - .withNewSpec() - .withReplicas(1) - .withNewTemplate() - .withNewMetadata() - .addToLabels("app", "httpd") - .endMetadata() - .withNewSpec() - .addNewContainer() - .withName("busybox") - .withImage("busybox") - .withCommand("sleep","36000") - .endContainer() - .endSpec() - .endTemplate() - .withNewSelector() - .addToMatchLabels("app","httpd") - .endSelector() - .endSpec() - .build(); - - client.apps().deployments().inNamespace(currentNamespace).create(deployment1); + @BeforeClass + public static void init() { + ClusterEntity.apply(DeploymentIT.class.getResourceAsStream("/deployment-it.yml")); } @Test public void load() { - - Deployment aDeployment = client.apps().deployments().inNamespace(currentNamespace).load(getClass().getResourceAsStream("/test-deployments.yml")).get(); + Deployment aDeployment = client.apps().deployments().inNamespace(session.getNamespace()).load(getClass().getResourceAsStream("/test-deployments.yml")).get(); assertThat(aDeployment).isNotNull(); assertEquals("nginx-deployment", aDeployment.getMetadata().getName()); } @Test public void get() { - deployment1 = client.apps().deployments().inNamespace(currentNamespace) - .withName("deployment1").get(); + deployment1 = client.apps().deployments().inNamespace(session.getNamespace()) + .withName("deployment-standard").get(); assertNotNull(deployment1); } @Test public void list() { - DeploymentList aDeploymentList = client.apps().deployments().inNamespace(currentNamespace).list(); + DeploymentList aDeploymentList = client.apps().deployments().inNamespace(session.getNamespace()).list(); assertThat(aDeploymentList).isNotNull(); - assertEquals(1, aDeploymentList.getItems().size()); + assertTrue(aDeploymentList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity deploymentReady = new ReadyEntity<>(Deployment.class, client, "deployment1", currentNamespace); - deployment1 = client.apps().deployments().inNamespace(currentNamespace).withName("deployment1").edit() - .editSpec().withReplicas(2).endSpec().done(); - await().atMost(30, TimeUnit.SECONDS).until(deploymentReady); + deployment1 = client.apps().deployments().inNamespace(session.getNamespace()).withName("deployment-standard") + .edit().editMetadata().addToAnnotations("updated", "true").endMetadata().done(); assertThat(deployment1).isNotNull(); - assertEquals(2, deployment1.getSpec().getReplicas().intValue()); - } - - @Test - public void delete() throws InterruptedException { - // Usually creation, deletion of things like Deployments take some time. So let's wait for a while: - // Wait for resources to get ready - ReadyEntity deploymentReady = new ReadyEntity<>(Deployment.class, client, "deployment1", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(deploymentReady); - assertTrue(client.apps().deployments().inNamespace(currentNamespace).delete(deployment1)); + assertEquals("true", deployment1.getMetadata().getAnnotations().get("updated")); } @Test - public void waitTest() throws InterruptedException { + public void waitTest() { // Wait for resources to get ready - ReadyEntity deploymentReady = new ReadyEntity<>(Deployment.class, client, "deployment1", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(deploymentReady); + ReadyEntity deploymentReady = new ReadyEntity<>(Deployment.class, client, "deployment-wait", session.getNamespace()); + await().atMost(120, TimeUnit.SECONDS).until(deploymentReady); Deployment deploymentOne = client.apps().deployments() - .inNamespace(currentNamespace).withName("deployment1").get(); + .inNamespace(session.getNamespace()).withName("deployment-wait").get(); assertTrue(Readiness.isDeploymentReady(deploymentOne)); } @Test public void listFromServer() { - ReadyEntity deploymentReady = new ReadyEntity<>(Deployment.class, client, "deployment1", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(deploymentReady); - - List resources = client.resourceList(deployment1).inNamespace(currentNamespace).fromServer().get(); + deployment1 = client.apps().deployments().inNamespace(session.getNamespace()).withName("deployment-standard").get(); + List resources = client.resourceList(deployment1).inNamespace(session.getNamespace()).fromServer().get(); assertNotNull(resources); assertEquals(1, resources.size()); @@ -158,26 +106,17 @@ public void listFromServer() { HasMetadata fromServerPod = resources.get(0); assertEquals(deployment1.getKind(), fromServerPod.getKind()); - assertEquals(currentNamespace, fromServerPod.getMetadata().getNamespace()); + assertEquals(session.getNamespace(), fromServerPod.getMetadata().getNamespace()); assertEquals(deployment1.getMetadata().getName(), fromServerPod.getMetadata().getName()); } - @After - public void cleanup() throws InterruptedException { - int attempts = 0; - do { - try { - if (client.apps().deployments().inNamespace(currentNamespace).list().getItems().size() != 0) { - client.apps().deployments().inNamespace(currentNamespace).delete(); - } - // Wait for resources to get destroyed - DeleteEntity deploymentDelete = new DeleteEntity<>(Deployment.class, client, "deployment1", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(deploymentDelete); - return; - } catch(NullPointerException exception) { - attempts++; - continue; - } - } while (attempts < 5); + @Test + public void delete() { + assertTrue(client.apps().deployments().inNamespace(session.getNamespace()).withName("deployment-delete").delete()); + } + + @AfterClass + public static void cleanup() { + ClusterEntity.remove(ClusterRoleBindingIT.class.getResourceAsStream("/deployment-it.yml")); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ListLoadTest.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ListLoadTest.java index 71786cd9f84..7eb62c53d25 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ListLoadTest.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ListLoadTest.java @@ -48,11 +48,12 @@ public void init() { @Test public void test() { + DeploymentList deploymentListOld = client.apps().deployments().inNamespace(currentNamespace).list(); client.load(getClass().getResourceAsStream("/test-list.json")).createOrReplace(); - DeploymentList aDeploymentList = client.apps().deployments().inNamespace(currentNamespace).list(); - assertThat(aDeploymentList).isNotNull(); - assertEquals(1, aDeploymentList.getItems().size()); + DeploymentList deploymentListNew = client.apps().deployments().inNamespace(currentNamespace).list(); + assertThat(deploymentListNew).isNotNull(); + assertEquals(deploymentListOld.getItems().size() + 1, deploymentListNew.getItems().size()); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/NetworkPolicyIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/NetworkPolicyIT.java index 4ea0876fdfa..51048161731 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/NetworkPolicyIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/NetworkPolicyIT.java @@ -16,22 +16,18 @@ package io.fabric8.kubernetes; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.DeleteEntity; import io.fabric8.commons.ReadyEntity; -import io.fabric8.kubernetes.api.model.IntOrString; import io.fabric8.kubernetes.api.model.networking.NetworkPolicy; -import io.fabric8.kubernetes.api.model.networking.NetworkPolicyBuilder; -import io.fabric8.kubernetes.api.model.networking.NetworkPolicyIngressRuleBuilder; -import io.fabric8.kubernetes.api.model.networking.NetworkPolicyPeerBuilder; -import io.fabric8.kubernetes.api.model.networking.NetworkPolicyPortBuilder; import io.fabric8.kubernetes.api.model.networking.NetworkPolicyList; import io.fabric8.kubernetes.client.KubernetesClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -39,8 +35,8 @@ import java.util.concurrent.TimeUnit; import static org.awaitility.Awaitility.await; -import static org.junit.Assert.*; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @RunWith(ArquillianConditionalRunner.class) @@ -53,44 +49,13 @@ public class NetworkPolicyIT { @ArquillianResource Session session; - private NetworkPolicy networkPolicy; - - private String currentNamespace; - - @Before - public void init(){ - currentNamespace = session.getNamespace(); - networkPolicy = new NetworkPolicyBuilder() - .withNewMetadata() - .withName("networkpolicy") - .addToLabels("foo","bar") - .endMetadata() - .withNewSpec() - .withNewPodSelector() - .addToMatchLabels("role","db") - .endPodSelector() - .addToIngress(0, - new NetworkPolicyIngressRuleBuilder() - .addToFrom(0, new NetworkPolicyPeerBuilder().withNewPodSelector() - .addToMatchLabels("role","frontend").endPodSelector() - .build() - ).addToFrom(1, new NetworkPolicyPeerBuilder().withNewNamespaceSelector() - .addToMatchLabels("project","myproject").endNamespaceSelector() - .build() - ) - .addToPorts(0,new NetworkPolicyPortBuilder().withPort(new IntOrString(6379)) - .withProtocol("TCP").build()) - .build() - ) - .endSpec() - .build(); - - client.network().networkPolicies().create(networkPolicy); + @BeforeClass + public static void init() { + ClusterEntity.apply(NetworkPolicyIT.class.getResourceAsStream("/networkpolicy-it.yml")); } @Test public void load() { - NetworkPolicy loadedNetworkPolicy = client.network().networkPolicies() .load(getClass().getResourceAsStream("/test-networkpolicy.yml")).get(); @@ -103,16 +68,14 @@ public void load() { .getPodSelector().getMatchLabels().get("role")); assertEquals("TCP", loadedNetworkPolicy.getSpec().getIngress().get(0).getPorts().get(0).getProtocol()); assertEquals(6379, loadedNetworkPolicy.getSpec().getIngress().get(0).getPorts().get(0).getPort().getIntVal().intValue()); - } @Test public void get() { - NetworkPolicy getNetworkPolicy = client.network().networkPolicies() - .withName("networkpolicy").get(); + .withName("networkpolicy-get").get(); assertNotNull(getNetworkPolicy); - assertEquals("networkpolicy", getNetworkPolicy.getMetadata().getName()); + assertEquals("networkpolicy-get", getNetworkPolicy.getMetadata().getName()); assertEquals(1,getNetworkPolicy.getMetadata().getLabels().size()); assertEquals("db", getNetworkPolicy.getSpec().getPodSelector().getMatchLabels().get("role")); assertEquals("myproject", getNetworkPolicy.getSpec().getIngress().get(0).getFrom().get(1) @@ -127,32 +90,35 @@ public void get() { public void list() { NetworkPolicyList networkPolicyList = client.network().networkPolicies() - .withLabels(Collections.singletonMap("foo","bar")).list(); + .withLabels(Collections.singletonMap("test","list")).list(); assertNotNull(networkPolicyList); - assertEquals(1,networkPolicyList.getItems().size()); - assertEquals("networkpolicy",networkPolicyList.getItems().get(0).getMetadata().getName()); - assertEquals("db", networkPolicyList.getItems().get(0).getSpec().getPodSelector().getMatchLabels().get("role")); - assertEquals("myproject", networkPolicyList.getItems().get(0).getSpec().getIngress().get(0).getFrom().get(1) - .getNamespaceSelector().getMatchLabels().get("project")); - assertEquals("frontend", networkPolicyList.getItems().get(0).getSpec().getIngress().get(0).getFrom().get(0) - .getPodSelector().getMatchLabels().get("role")); - assertEquals("TCP", networkPolicyList.getItems().get(0).getSpec().getIngress().get(0).getPorts().get(0).getProtocol()); - assertEquals(6379, networkPolicyList.getItems().get(0).getSpec().getIngress().get(0).getPorts().get(0).getPort().getIntVal().intValue()); + assertTrue(networkPolicyList.getItems().size() >= 1); + for (NetworkPolicy networkPolicy : networkPolicyList.getItems()) { + if (networkPolicy.getMetadata().getName().equals("networkpolicy-list")) { + assertEquals("db", networkPolicy.getSpec().getPodSelector().getMatchLabels().get("role")); + assertEquals("myproject", networkPolicy.getSpec().getIngress().get(0).getFrom().get(1) + .getNamespaceSelector().getMatchLabels().get("project")); + assertEquals("frontend", networkPolicy.getSpec().getIngress().get(0).getFrom().get(0) + .getPodSelector().getMatchLabels().get("role")); + assertEquals("TCP", networkPolicy.getSpec().getIngress().get(0).getPorts().get(0).getProtocol()); + assertEquals(6379, networkPolicy.getSpec().getIngress().get(0).getPorts().get(0).getPort().getIntVal().intValue()); + } + } } @Test - public void update(){ - ReadyEntity networkPolicyReady = new ReadyEntity<>(NetworkPolicy.class, client, "networkpolicy", currentNamespace); - networkPolicy = client.network().networkPolicies() - .withName("networkpolicy").edit() - .editMetadata().addToLabels("bar","foo").endMetadata() + public void update() { + ReadyEntity networkPolicyReady = new ReadyEntity<>(NetworkPolicy.class, client, "networkpolicy-update", session.getNamespace()); + NetworkPolicy networkPolicy = client.network().networkPolicies() + .withName("networkpolicy-update").edit() + .editMetadata().addToLabels("bar", "foo").endMetadata() .done(); await().atMost(30, TimeUnit.SECONDS).until(networkPolicyReady); assertNotNull(networkPolicy); - assertEquals("networkpolicy",networkPolicy.getMetadata().getName()); - assertEquals(2,networkPolicy.getMetadata().getLabels().size()); + assertEquals("networkpolicy-update", networkPolicy.getMetadata().getName()); + assertEquals(1, networkPolicy.getMetadata().getLabels().size()); assertEquals("db", networkPolicy.getSpec().getPodSelector().getMatchLabels().get("role")); assertEquals("myproject", networkPolicy.getSpec().getIngress().get(0).getFrom().get(1) .getNamespaceSelector().getMatchLabels().get("project")); @@ -164,26 +130,19 @@ public void update(){ } @Test - public void delete(){ - ReadyEntity networkPolicyReady = new ReadyEntity<>(NetworkPolicy.class, client, "networkpolicy", currentNamespace); + public void delete() { + ReadyEntity networkPolicyReady = new ReadyEntity<>(NetworkPolicy.class, client, "networkpolicy-delete", session.getNamespace()); await().atMost(30, TimeUnit.SECONDS).until(networkPolicyReady); - boolean deleted = client.network().networkPolicies().delete(networkPolicy); + boolean deleted = client.network().networkPolicies().inNamespace(session.getNamespace()).withName("networkpolicy-delete").delete(); assertTrue(deleted); - DeleteEntity deleteEntity = new DeleteEntity<>(NetworkPolicy.class, client, "networkpolicy", currentNamespace); + DeleteEntity deleteEntity = new DeleteEntity<>(NetworkPolicy.class, client, "networkpolicy-delete", session.getNamespace()); await().atMost(30, TimeUnit.SECONDS).until(deleteEntity); - - NetworkPolicyList networkPolicyList = client.network().networkPolicies().list(); - assertEquals(0,networkPolicyList.getItems().size()); } - @After - public void cleanup() { - if (client.network().networkPolicies().list().getItems().size()!= 0) { - client.network().networkPolicies().delete(); - } - DeleteEntity networkPolicyDelete = new DeleteEntity<>(NetworkPolicy.class, client, "networkpolicy", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(networkPolicyDelete); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(NetworkPolicyIT.class.getResourceAsStream("/networkpolicy-it.yml")); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/PodIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/PodIT.java index e68e053ad82..b9aef274344 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/PodIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/PodIT.java @@ -17,15 +17,13 @@ package io.fabric8.kubernetes; import com.google.common.io.Files; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.IntOrString; -import io.fabric8.kubernetes.api.model.LabelSelectorBuilder; import io.fabric8.kubernetes.api.model.Pod; import io.fabric8.kubernetes.api.model.PodBuilder; import io.fabric8.kubernetes.api.model.PodList; -import io.fabric8.kubernetes.api.model.PodSpec; -import io.fabric8.kubernetes.api.model.PodSpecBuilder; import io.fabric8.kubernetes.api.model.policy.PodDisruptionBudget; import io.fabric8.kubernetes.api.model.policy.PodDisruptionBudgetBuilder; import io.fabric8.kubernetes.api.model.policy.PodDisruptionBudgetSpecBuilder; @@ -35,13 +33,12 @@ import io.fabric8.kubernetes.client.internal.readiness.Readiness; import okhttp3.Response; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang.RandomStringUtils; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import org.slf4j.Logger; @@ -56,7 +53,6 @@ import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.List; -import java.util.Locale; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -82,69 +78,50 @@ public class PodIT { private Pod pod1; - private String currentNamespace; + private static final int POD_READY_WAIT_IN_SECONDS = 60; private static final Logger logger = LoggerFactory.getLogger(PodIT.class); - @Before - public void init() { - currentNamespace = session.getNamespace(); - client.pods().inNamespace(currentNamespace).delete(); - - String suffix = RandomStringUtils.randomAlphanumeric(6).toLowerCase(Locale.ROOT); - String pdbScope = "test-" + suffix; - - pod1 = new PodBuilder() - .withNewMetadata() - .withName("pod1-" + suffix) - .addToLabels("pdb-scope", pdbScope) - .endMetadata() - .withNewSpec() - .addNewContainer() - .withName("busybox") - .withImage("busybox") - .withCommand("sleep","36000") - .endContainer() - .endSpec() - .build(); - - client.pods().inNamespace(currentNamespace).createOrReplace(pod1); + @BeforeClass + public static void init() { + ClusterEntity.apply(PodIT.class.getResourceAsStream("/pod-it.yml")); } @Test public void load() { - Pod aPod = client.pods().inNamespace(currentNamespace).load(getClass().getResourceAsStream("/test-pod.yml")).get(); + Pod aPod = client.pods().inNamespace(session.getNamespace()).load(getClass().getResourceAsStream("/test-pod.yml")).get(); assertThat(aPod).isNotNull(); assertEquals("nginx", aPod.getMetadata().getName()); } @Test public void get() { - pod1 = client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).get(); + pod1 = client.pods().inNamespace(session.getNamespace()).withName("pod-standard").get(); assertNotNull(pod1); } @Test public void list() { - PodList podList = client.pods().inNamespace(currentNamespace).list(); + PodList podList = client.pods().inNamespace(session.getNamespace()).list(); assertThat(podList).isNotNull(); assertTrue(podList.getItems().size() >= 1); } @Test public void update() { - pod1 = client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).edit() + pod1 = client.pods().inNamespace(session.getNamespace()).withName("pod-standard").edit() .editMetadata().addToLabels("foo", "bar").endMetadata().done(); assertEquals("bar", pod1.getMetadata().getLabels().get("foo")); } @Test public void delete() { - assertTrue(client.pods().inNamespace(currentNamespace).delete(pod1)); + assertTrue(client.pods().inNamespace(session.getNamespace()).withName("pod-delete").delete()); } @Test public void evict() throws InterruptedException { + pod1 = client.pods().inNamespace(session.getNamespace()).withName("pod-standard").get(); String pdbScope = pod1.getMetadata().getLabels().get("pdb-scope"); assertNotNull("pdb-scope label is null. is pod1 misconfigured?", pdbScope); @@ -178,47 +155,49 @@ public void evict() throws InterruptedException { .withSpec(pod1.getSpec()) .build(); - client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()) - .waitUntilReady(30, TimeUnit.SECONDS); + client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()) + .waitUntilReady(POD_READY_WAIT_IN_SECONDS, TimeUnit.SECONDS); - client.pods().inNamespace(currentNamespace).createOrReplace(pod2); - client.pods().inNamespace(currentNamespace).withName(pod2.getMetadata().getName()) - .waitUntilReady(30, TimeUnit.SECONDS); + client.pods().inNamespace(session.getNamespace()).createOrReplace(pod2); + client.pods().inNamespace(session.getNamespace()).withName(pod2.getMetadata().getName()) + .waitUntilReady(POD_READY_WAIT_IN_SECONDS, TimeUnit.SECONDS); - client.policy().podDisruptionBudget().inNamespace(currentNamespace).createOrReplace(pdb); + client.policy().podDisruptionBudget().inNamespace(session.getNamespace()).createOrReplace(pdb); - assertTrue(client.pods().inNamespace(currentNamespace).withName(pod2.getMetadata().getName()).evict()); + assertTrue(client.pods().inNamespace(session.getNamespace()).withName(pod2.getMetadata().getName()).evict()); // cant evict because only one left - assertFalse(client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).evict()); + assertFalse(client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()).evict()); // ensure it really is still up - assertTrue(Readiness.isReady(client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).fromServer().get())); + assertTrue(Readiness.isReady(client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()).fromServer().get())); // create another pod to satisfy PDB - client.pods().inNamespace(currentNamespace).createOrReplace(pod3); - client.pods().inNamespace(currentNamespace).withName(pod3.getMetadata().getName()) - .waitUntilReady(30, TimeUnit.SECONDS); + client.pods().inNamespace(session.getNamespace()).createOrReplace(pod3); + client.pods().inNamespace(session.getNamespace()).withName(pod3.getMetadata().getName()) + .waitUntilReady(POD_READY_WAIT_IN_SECONDS, TimeUnit.SECONDS); // can now evict - assertTrue(client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).evict()); + assertTrue(client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()).evict()); } @Test public void log() throws InterruptedException { // Wait for resources to get ready - ReadyEntity podReady = new ReadyEntity(Pod.class, client, pod1.getMetadata().getName(), currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(podReady); - String log = client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).getLog(); + pod1 = client.pods().inNamespace(session.getNamespace()).withName("pod-standard").get(); + ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), session.getNamespace()); + await().atMost(POD_READY_WAIT_IN_SECONDS, TimeUnit.SECONDS).until(podReady); + String log = client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()).getLog(); assertNotNull(log); } @Test public void exec() throws InterruptedException { // Wait for resources to get ready - ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(podReady); + pod1 = client.pods().inNamespace(session.getNamespace()).withName("pod-standard").get(); + ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), session.getNamespace()); + await().atMost(POD_READY_WAIT_IN_SECONDS, TimeUnit.SECONDS).until(podReady); final CountDownLatch execLatch = new CountDownLatch(1); ByteArrayOutputStream out = new ByteArrayOutputStream(); - ExecWatch execWatch = client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()) + ExecWatch execWatch = client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()) .writingOutput(out).withTTY().usingListener(new ExecListener() { @Override public void onOpen(Response response) { @@ -246,10 +225,11 @@ public void onClose(int i, String s) { @Test public void readFile() throws IOException { // Wait for resources to get ready - ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(podReady); - ExecWatch watch = client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).writingOutput(System.out).exec("sh", "-c", "echo 'hello' > /msg"); - try (InputStream is = client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).file("/msg").read()) { + pod1 = client.pods().inNamespace(session.getNamespace()).withName("pod-standard").get(); + ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), session.getNamespace()); + await().atMost(60, TimeUnit.SECONDS).until(podReady); + ExecWatch watch = client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()).writingOutput(System.out).exec("sh", "-c", "echo 'hello' > /msg"); + try (InputStream is = client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()).file("/msg").read()) { String result = new BufferedReader(new InputStreamReader(is)).lines().collect(Collectors.joining("\n")); assertEquals("hello", result); } @@ -258,10 +238,11 @@ public void readFile() throws IOException { @Test public void readFileEscapedParams() throws IOException { // Wait for resources to get ready - ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(podReady); - ExecWatch watch = client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).writingOutput(System.out).exec("sh", "-c", "echo 'H$ll* (W&RLD}' > /msg"); - try (InputStream is = client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).file("/msg").read()) { + pod1 = client.pods().inNamespace(session.getNamespace()).withName("pod-standard").get(); + ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), session.getNamespace()); + await().atMost(POD_READY_WAIT_IN_SECONDS, TimeUnit.SECONDS).until(podReady); + ExecWatch watch = client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()).writingOutput(System.out).exec("sh", "-c", "echo 'H$ll* (W&RLD}' > /msg"); + try (InputStream is = client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()).file("/msg").read()) { String result = new BufferedReader(new InputStreamReader(is)).lines().collect(Collectors.joining("\n")); assertEquals("H$ll* (W&RLD}", result); } @@ -270,19 +251,20 @@ public void readFileEscapedParams() throws IOException { @Test public void uploadFile() throws IOException { // Wait for resources to get ready - ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(podReady); + pod1 = client.pods().inNamespace(session.getNamespace()).withName("pod-standard").get(); + ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), session.getNamespace()); + await().atMost(POD_READY_WAIT_IN_SECONDS, TimeUnit.SECONDS).until(podReady); final File tmpDir = Files.createTempDir(); final File tmpFile = new File(tmpDir, "toBeUploaded"); tmpFile.createNewFile(); Files.write("I'm uploaded", tmpFile, StandardCharsets.UTF_8); - client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()) + client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()) .file("/tmp/toBeUploaded").upload(tmpFile.toPath()); try ( - final InputStream checkIs = client.pods().inNamespace(currentNamespace) + final InputStream checkIs = client.pods().inNamespace(session.getNamespace()) .withName(pod1.getMetadata().getName()).file("/tmp/toBeUploaded").read(); final ByteArrayOutputStream resultOs = new ByteArrayOutputStream() ) { @@ -292,10 +274,11 @@ public void uploadFile() throws IOException { } @Test - public void uploadDir() throws IOException { + public void uploadDir() { // Wait for resources to get ready - ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(podReady); + pod1 = client.pods().inNamespace(session.getNamespace()).withName("pod-standard").get(); + ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), session.getNamespace()); + await().atMost(POD_READY_WAIT_IN_SECONDS, TimeUnit.SECONDS).until(podReady); final String[] files = new String[]{"1", "2"}; final File tmpDir = Files.createTempDir(); @@ -310,12 +293,12 @@ public void uploadDir() throws IOException { } }); - client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()) + client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()) .dir("/tmp/uploadDir").upload(uploadDir.toPath()); Stream.of(files).forEach(fileName -> { try ( - final InputStream checkIs = client.pods().inNamespace(currentNamespace) + final InputStream checkIs = client.pods().inNamespace(session.getNamespace()) .withName(pod1.getMetadata().getName()).file("/tmp/uploadDir/"+fileName).read(); final ByteArrayOutputStream resultOs = new ByteArrayOutputStream() ) { @@ -330,12 +313,13 @@ public void uploadDir() throws IOException { @Test public void copyFile() throws IOException { // Wait for resources to get ready - ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(podReady); + pod1 = client.pods().inNamespace(session.getNamespace()).withName("pod-standard").get(); + ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), session.getNamespace()); + await().atMost(POD_READY_WAIT_IN_SECONDS, TimeUnit.SECONDS).until(podReady); File tmpDir = Files.createTempDir(); - ExecWatch watch = client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).writingOutput(System.out).exec("sh", "-c", "echo 'hello' > /msg"); - client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).file("/msg").copy(tmpDir.toPath()); + ExecWatch watch = client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()).writingOutput(System.out).exec("sh", "-c", "echo 'hello' > /msg"); + client.pods().inNamespace(session.getNamespace()).withName(pod1.getMetadata().getName()).file("/msg").copy(tmpDir.toPath()); File msg = tmpDir.toPath().resolve("msg").toFile(); assertTrue(msg.exists()); try (InputStream is = new FileInputStream(msg)) { @@ -347,10 +331,11 @@ public void copyFile() throws IOException { @Test public void listFromServer() { // Wait for resources to get ready - ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(podReady); + pod1 = client.pods().inNamespace(session.getNamespace()).withName("pod-standard").get(); + ReadyEntity podReady = new ReadyEntity<>(Pod.class, client, pod1.getMetadata().getName(), session.getNamespace()); + await().atMost(POD_READY_WAIT_IN_SECONDS, TimeUnit.SECONDS).until(podReady); - List resources = client.resourceList(pod1).inNamespace(currentNamespace).fromServer().get(); + List resources = client.resourceList(pod1).inNamespace(session.getNamespace()).fromServer().get(); assertNotNull(resources); assertEquals(1, resources.size()); @@ -359,13 +344,13 @@ public void listFromServer() { HasMetadata fromServerPod = resources.get(0); assertEquals(pod1.getKind(), fromServerPod.getKind()); - assertEquals(currentNamespace, fromServerPod.getMetadata().getNamespace()); + assertEquals(session.getNamespace(), fromServerPod.getMetadata().getNamespace()); assertEquals(pod1.getMetadata().getName(), fromServerPod.getMetadata().getName()); } - @After - public void cleanup() throws Exception { - client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).delete(); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(NetworkPolicyIT.class.getResourceAsStream("/pod-it.yml")); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/PodSecurityPolicyIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/PodSecurityPolicyIT.java index 6c8abcc5f70..d6783b70d0d 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/PodSecurityPolicyIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/PodSecurityPolicyIT.java @@ -16,12 +16,11 @@ package io.fabric8.kubernetes; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.DeleteEntity; import io.fabric8.kubernetes.api.model.policy.PodSecurityPolicy; -import io.fabric8.kubernetes.api.model.policy.PodSecurityPolicyBuilder; import io.fabric8.kubernetes.api.model.policy.PodSecurityPolicyList; import io.fabric8.kubernetes.client.KubernetesClient; -import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; @@ -31,8 +30,9 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; -import org.junit.Before; -import org.junit.After; + +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -46,28 +46,9 @@ public class PodSecurityPolicyIT { @ArquillianResource KubernetesClient client; - @ArquillianResource - Session session; - - private PodSecurityPolicy podSecurityPolicy; - - @Before - public void init(){ - - podSecurityPolicy = new PodSecurityPolicyBuilder().withNewMetadata() - .withName("test-example") - .addToLabels("foo","bar") - .endMetadata() - .withNewSpec() - .withPrivileged(false) - .withNewRunAsUser().withRule("RunAsAny").endRunAsUser() - .withNewFsGroup().withRule("RunAsAny").endFsGroup() - .withNewSeLinux().withRule("RunAsAny").endSeLinux() - .withNewSupplementalGroups().withRule("RunAsAny").endSupplementalGroups() - .endSpec() - .build(); - - client.policy().podSecurityPolicies().create(podSecurityPolicy); + @BeforeClass + public static void init() { + ClusterEntity.apply(PodSecurityPolicyIT.class.getResourceAsStream("/podsecuritypolicy-it.yml")); } @Test @@ -87,11 +68,10 @@ public void load() { @Test public void get() { - PodSecurityPolicy getPodSecurityPolicy = client.policy().podSecurityPolicies() - .withName("test-example").get(); + .withName("psp-get").get(); assertNotNull(getPodSecurityPolicy); - assertEquals("test-example", getPodSecurityPolicy.getMetadata().getName()); + assertEquals("psp-get", getPodSecurityPolicy.getMetadata().getName()); } @Test @@ -101,7 +81,7 @@ public void list() { .withLabels(Collections.singletonMap("foo","bar")).list(); assertNotNull(podSecurityPolicyList); assertEquals(1,podSecurityPolicyList.getItems().size()); - assertEquals("test-example",podSecurityPolicyList.getItems().get(0).getMetadata().getName()); + assertEquals("psp-list",podSecurityPolicyList.getItems().get(0).getMetadata().getName()); assertEquals("RunAsAny",podSecurityPolicyList.getItems().get(0).getSpec().getRunAsUser().getRule()); assertEquals("RunAsAny",podSecurityPolicyList.getItems().get(0).getSpec().getFsGroup().getRule()); assertEquals("RunAsAny",podSecurityPolicyList.getItems().get(0).getSpec().getSeLinux().getRule()); @@ -111,34 +91,31 @@ public void list() { @Test public void update(){ - podSecurityPolicy = client.policy().podSecurityPolicies().withName("test-example").edit() + PodSecurityPolicy podSecurityPolicy = client.policy().podSecurityPolicies().withName("psp-update").edit() .editSpec().withPrivileged(true).endSpec() .done(); assertNotNull(podSecurityPolicy); - assertEquals("test-example",podSecurityPolicy.getMetadata().getName()); + assertEquals("psp-update", podSecurityPolicy.getMetadata().getName()); assertTrue(podSecurityPolicy.getSpec().getPrivileged()); - assertEquals("RunAsAny",podSecurityPolicy.getSpec().getRunAsUser().getRule()); - assertEquals("RunAsAny",podSecurityPolicy.getSpec().getFsGroup().getRule()); - assertEquals("RunAsAny",podSecurityPolicy.getSpec().getSeLinux().getRule()); - assertEquals("RunAsAny",podSecurityPolicy.getSpec().getSupplementalGroups().getRule()); + assertEquals("RunAsAny", podSecurityPolicy.getSpec().getRunAsUser().getRule()); + assertEquals("RunAsAny", podSecurityPolicy.getSpec().getFsGroup().getRule()); + assertEquals("RunAsAny", podSecurityPolicy.getSpec().getSeLinux().getRule()); + assertEquals("RunAsAny", podSecurityPolicy.getSpec().getSupplementalGroups().getRule()); } @Test public void delete(){ - boolean deleted = client.policy().podSecurityPolicies().delete(podSecurityPolicy); + boolean deleted = client.policy().podSecurityPolicies().withName("psp-delete").delete(); assertTrue(deleted); - DeleteEntity deleteEntity = new DeleteEntity<>(PodSecurityPolicy.class, client, "test-example", null); + DeleteEntity deleteEntity = new DeleteEntity<>(PodSecurityPolicy.class, client, "psp-delete", null); await().atMost(30, TimeUnit.SECONDS).until(deleteEntity); - PodSecurityPolicyList podSecurityPolicyList = client.policy().podSecurityPolicies().list(); - assertEquals(0,podSecurityPolicyList.getItems().size()); } - @After - public void cleanup() { - client.policy().podSecurityPolicies().withName("test-example").delete(); - DeleteEntity deleteEntity = new DeleteEntity<>(PodSecurityPolicy.class, client, "test-example", null); - await().atMost(30, TimeUnit.SECONDS).until(deleteEntity); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(PodSecurityPolicyIT.class.getResourceAsStream("/podsecuritypolicy-it.yml")); } + } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RawCustomResourceIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RawCustomResourceIT.java index c2a707105da..ca3584810e0 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RawCustomResourceIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RawCustomResourceIT.java @@ -51,8 +51,6 @@ public class RawCustomResourceIT { private CustomResourceDefinitionContext customResourceDefinitionContext; - private CustomResourceDefinitionContext customResourceDefinitionContextWithOpenAPIV3Schema; - @Before public void initCustomResourceDefinition() { currentNamespace = session.getNamespace(); @@ -68,18 +66,6 @@ public void initCustomResourceDefinition() { .withPlural("animals") .withScope("Namespaced") .build(); - - // Create a Custom Resource Definition with OpenAPIV3 validation schema - CustomResourceDefinition aComplexCrd = client.customResourceDefinitions().load(getClass().getResourceAsStream("/kafka-crd.yml")).get(); - client.customResourceDefinitions().create(aComplexCrd); - - customResourceDefinitionContextWithOpenAPIV3Schema = new CustomResourceDefinitionContext.Builder() - .withName("kafkas.kafka.strimzi.io") - .withGroup("kafka.strimzi.io") - .withPlural("kafkas") - .withScope("Namespaced") - .withVersion("v1beta1") - .build(); } @Test @@ -120,14 +106,8 @@ public void testCrud() throws IOException { object = client.customResource(customResourceDefinitionContext).edit(currentNamespace, "walrus", new ObjectMapper().writeValueAsString(object)); assertThat(((HashMap)object.get("spec")).get("image")).isEqualTo("my-updated-awesome-walrus-image"); - // Test creation with openAPIV3Schema - Map ret = client.customResource(customResourceDefinitionContextWithOpenAPIV3Schema).create(currentNamespace, getClass().getResourceAsStream("/kafka-cr.yml")); - assertThat(ret).isNotNull(); - assertThat(((Map)ret.get("metadata")).get("name")).isEqualTo("kafka-single"); - // Test Delete: client.customResource(customResourceDefinitionContext).delete(currentNamespace, "otter"); - client.customResource(customResourceDefinitionContextWithOpenAPIV3Schema).delete(currentNamespace, "kafka-single"); client.customResource(customResourceDefinitionContext).delete(currentNamespace); } @@ -135,6 +115,5 @@ public void testCrud() throws IOException { public void cleanup() { // Delete Custom Resource Definition Animals: client.customResourceDefinitions().withName(customResourceDefinitionContext.getName()).delete(); - client.customResourceDefinitions().withName(customResourceDefinitionContextWithOpenAPIV3Schema.getName()).delete(); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ReplicaSetIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ReplicaSetIT.java index 967bcea1f56..caeb112d163 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ReplicaSetIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ReplicaSetIT.java @@ -16,27 +16,19 @@ package io.fabric8.kubernetes; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; -import io.fabric8.kubernetes.api.model.EnvVar; -import io.fabric8.kubernetes.api.model.Quantity; import io.fabric8.kubernetes.api.model.apps.ReplicaSet; -import io.fabric8.kubernetes.api.model.apps.ReplicaSetBuilder; import io.fabric8.kubernetes.api.model.apps.ReplicaSetList; import io.fabric8.kubernetes.client.KubernetesClient; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; import java.util.concurrent.TimeUnit; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -57,57 +49,14 @@ public class ReplicaSetIT { private ReplicaSet replicaset1; - private String currentNamespace; - - @Before - public void init() { - currentNamespace = session.getNamespace(); - Map requests = new HashMap<>(); - requests.put("cpu", new Quantity("100m")); - requests.put("memory", new Quantity("100Mi")); - - List envVarList = new ArrayList<>(); - envVarList.add(new EnvVar("name", "GET_HOSTS_FROM", null)); - envVarList.add(new EnvVar("value", "dns", null)); - - replicaset1 = new ReplicaSetBuilder() - .withNewMetadata() - .withName("replicaset1") - .addToLabels("app", "guestbook") - .addToLabels("tier", "frontend") - .endMetadata() - .withNewSpec() - .withReplicas(1) - .withNewSelector() - .withMatchLabels(Collections.singletonMap("tier", "frontend")) - .endSelector() - .withNewTemplate() - .withNewMetadata() - .addToLabels("app", "guestbook") - .addToLabels("tier", "frontend") - .endMetadata() - .withNewSpec() - .addNewContainer() - .withName("busybox") - .withImage("busybox") - .withCommand("sleep","36000") - .withNewResources() - .withRequests(requests) - .endResources() - .withEnv(envVarList) - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - - client.apps().replicaSets().inNamespace(currentNamespace).createOrReplace(replicaset1); + @BeforeClass + public static void init() { + ClusterEntity.apply(ReplicaSetIT.class.getResourceAsStream("/replicaset-it.yml")); } @Test public void load() { - String currentNamespace = session.getNamespace(); - ReplicaSet replicaSet = client.apps().replicaSets().inNamespace(currentNamespace) + ReplicaSet replicaSet = client.apps().replicaSets().inNamespace(session.getNamespace()) .load(getClass().getResourceAsStream("/test-replicaset.yml")).get(); assertThat(replicaSet).isNotNull(); assertEquals("frontend", replicaSet.getMetadata().getName()); @@ -115,23 +64,23 @@ public void load() { @Test public void get() { - replicaset1 = client.apps().replicaSets().inNamespace(currentNamespace).withName("replicaset1").get(); + replicaset1 = client.apps().replicaSets().inNamespace(session.getNamespace()).withName("replicaset-get").get(); assertNotNull(replicaset1); } @Test public void list() { - ReplicaSetList replicaSetList = client.apps().replicaSets().inNamespace(currentNamespace).list(); + ReplicaSetList replicaSetList = client.apps().replicaSets().inNamespace(session.getNamespace()).list(); assertThat(replicaSetList).isNotNull(); assertTrue(replicaSetList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity replicaSetReady = new ReadyEntity<>(ReplicaSet.class, client, "replicaset1", currentNamespace); + ReadyEntity replicaSetReady = new ReadyEntity<>(ReplicaSet.class, client, "replicaset-update", session.getNamespace()); await().atMost(30, TimeUnit.SECONDS).until(replicaSetReady); - replicaset1 = client.apps().replicaSets().inNamespace(currentNamespace).withName("replicaset1").edit() + replicaset1 = client.apps().replicaSets().inNamespace(session.getNamespace()).withName("replicaset-update").edit() .editSpec().withReplicas(2).endSpec().done(); assertThat(replicaset1).isNotNull(); assertEquals(2, replicaset1.getSpec().getReplicas().intValue()); @@ -139,19 +88,14 @@ public void update() { @Test public void delete() { - ReadyEntity replicaSetReady = new ReadyEntity<>(ReplicaSet.class, client, "replicaset1", currentNamespace); + ReadyEntity replicaSetReady = new ReadyEntity<>(ReplicaSet.class, client, "replicaset-delete", session.getNamespace()); await().atMost(30, TimeUnit.SECONDS).until(replicaSetReady); - boolean bDeleted = client.apps().replicaSets().inNamespace(currentNamespace).withName("replicaset1").delete(); + boolean bDeleted = client.apps().replicaSets().inNamespace(session.getNamespace()).withName("replicaset-delete").delete(); assertTrue(bDeleted); } - @After - public void cleanup() throws InterruptedException { - if (client.apps().replicaSets().inNamespace(currentNamespace).list().getItems().size()!= 0) { - client.apps().replicaSets().inNamespace(currentNamespace).delete(); - } - // Wait for resources to get destroyed - DeleteEntity replicaSetDelete = new DeleteEntity<>(ReplicaSet.class, client, "replicaset1", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(replicaSetDelete); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(ReplicaSetIT.class.getResourceAsStream("/replicaset-it.yml")); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ReplicationControllerIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ReplicationControllerIT.java index 7acb76131ad..77c72263076 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ReplicationControllerIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ReplicationControllerIT.java @@ -16,24 +16,21 @@ package io.fabric8.kubernetes; -import io.fabric8.commons.DeleteEntity; -import io.fabric8.commons.ReadyEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.kubernetes.api.model.*; import io.fabric8.kubernetes.client.KubernetesClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; -import java.util.concurrent.TimeUnit; import static junit.framework.TestCase.assertNotNull; import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -48,70 +45,47 @@ public class ReplicationControllerIT { private ReplicationController rc1; - private String currentNamespace; - - @Before - public void init() { - currentNamespace = session.getNamespace(); - rc1 = new ReplicationControllerBuilder() - .withNewMetadata().withName("nginx-controller").addToLabels("server", "nginx").endMetadata() - .withNewSpec().withReplicas(3) - .withNewTemplate() - .withNewMetadata().addToLabels("server", "nginx").endMetadata() - .withNewSpec() - .addNewContainer().withName("nginx").withImage("nginx") - .addNewPort().withContainerPort(80).endPort() - .endContainer() - .endSpec() - .endTemplate() - .endSpec().build(); - - client.replicationControllers().inNamespace(currentNamespace).createOrReplace(rc1); + @BeforeClass + public static void init() { + ClusterEntity.apply(ReplicationControllerIT.class.getResourceAsStream("/replicationcontroller-it.yml")); } @Test public void load() { - ReplicationController aReplicationController = client.replicationControllers().inNamespace(currentNamespace) + ReplicationController aReplicationController = client.replicationControllers().inNamespace(session.getNamespace()) .load(getClass().getResourceAsStream("/test-replicationcontroller.yml")).get(); assertThat(aReplicationController).isNotNull(); assertEquals("nginx", aReplicationController.getMetadata().getName()); - assertEquals(3, aReplicationController.getSpec().getReplicas().intValue()); + assertEquals(3, (int) aReplicationController.getSpec().getReplicas()); } @Test public void get() { - rc1 = client.replicationControllers().inNamespace(currentNamespace).withName("nginx-controller").get(); + rc1 = client.replicationControllers().inNamespace(session.getNamespace()).withName("rc-get").get(); assertNotNull(rc1); } @Test public void list() { - ReplicationControllerList aRcList = client.replicationControllers().inNamespace(currentNamespace).list(); + ReplicationControllerList aRcList = client.replicationControllers().inNamespace(session.getNamespace()).list(); assertThat(aRcList).isNotNull(); - assertEquals(1, aRcList.getItems().size()); + assertTrue(aRcList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity replicationControllerReady = new ReadyEntity<>(ReplicationController.class, client, "nginx-controller", currentNamespace); - rc1 = client.replicationControllers().inNamespace(currentNamespace).withName("nginx-controller").scale(5); + rc1 = client.replicationControllers().inNamespace(session.getNamespace()).withName("rc-update").scale(5); assertEquals(5, rc1.getSpec().getReplicas().intValue()); } @Test public void delete() { - ReadyEntity replicationControllerReady = new ReadyEntity<>(ReplicationController.class, client, "nginx-controller", currentNamespace); - assertTrue(client.replicationControllers().inNamespace(currentNamespace).withName("nginx-controller").delete()); + assertTrue(client.replicationControllers().inNamespace(session.getNamespace()).withName("rc-delete").delete()); } - @After - public void cleanup() throws InterruptedException { - if (client.replicationControllers().inNamespace(currentNamespace).list().getItems().size()!= 0) { - client.replicationControllers().inNamespace(currentNamespace).delete(); - } - // Wait for resources to get destroyed - DeleteEntity replicationControllerDelete = new DeleteEntity<>(ReplicationController.class, client, "nginx-controller", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(replicationControllerDelete); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(ReplicationControllerIT.class.getResourceAsStream("/replicationcontroller-it.yml")); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ResourceIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ResourceIT.java index 11dc92f09ce..e1fe35c68b3 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ResourceIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ResourceIT.java @@ -15,6 +15,7 @@ */ package io.fabric8.kubernetes; +import io.fabric8.commons.ClusterEntity; import io.fabric8.kubernetes.api.model.ConfigMap; import io.fabric8.kubernetes.api.model.ConfigMapBuilder; import io.fabric8.kubernetes.api.model.DeletionPropagation; @@ -30,27 +31,23 @@ import io.fabric8.kubernetes.api.model.apps.Deployment; import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder; import io.fabric8.kubernetes.client.KubernetesClient; -import io.fabric8.openshift.client.OpenShiftClient; -import org.apache.commons.lang.RandomStringUtils; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; -import java.sql.Time; import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import static junit.framework.TestCase.assertNotNull; import static org.awaitility.Awaitility.await; -import static org.awaitility.Awaitility.waitAtMost; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -63,10 +60,6 @@ public class ResourceIT { @ArquillianResource Session session; - private Pod pod1; - - private String currentNamespace; - private Deployment deployment = new DeploymentBuilder() .withNewMetadata().withName("deploy1").endMetadata() .withNewSpec() @@ -85,22 +78,14 @@ public class ResourceIT { .endSpec() .build(); - @Before - public void init() { - currentNamespace = session.getNamespace(); - pod1 = new PodBuilder() - .withNewMetadata().withName("resource-pod-" + RandomStringUtils.randomAlphanumeric(6).toLowerCase(Locale.ROOT)).endMetadata() - .withNewSpec() - .addNewContainer().withName("nginx").withImage("nginx").endContainer() - .endSpec() - .build(); - - client.resource(pod1).inNamespace(currentNamespace).createOrReplace(); + @BeforeClass + public static void init() { + ClusterEntity.apply(ResourceIT.class.getResourceAsStream("/resource-it.yml")); } @Test public void get() { - assertNotNull(client.pods().inNamespace(currentNamespace).withName(pod1.getMetadata().getName()).get()); + assertNotNull(client.pods().inNamespace(session.getNamespace()).withName("resource-pod-get").get()); } @Test @@ -112,13 +97,13 @@ public void list() { .endSpec() .build(); client.resourceList(new PodListBuilder().withItems(listPod1).build()) - .inNamespace(currentNamespace) - .apply(); + .inNamespace(session.getNamespace()) + .createOrReplace(); - assertTrue(client.pods().inNamespace(currentNamespace).withName("pod3") != null); + Assert.assertNotNull(client.pods().inNamespace(session.getNamespace()).withName("pod3")); boolean bDeleted = client.resourceList(new PodListBuilder().withItems(listPod1).build()) - .inNamespace(currentNamespace) + .inNamespace(session.getNamespace()) .delete(); assertTrue(bDeleted); } @@ -142,17 +127,17 @@ public void createOrReplace() { KubernetesList list = new KubernetesListBuilder().withItems(deployment, service, configMap).build(); // Create them for the first time - client.resourceList(list).inNamespace(currentNamespace).createOrReplace(); + client.resourceList(list).inNamespace(session.getNamespace()).createOrReplace(); // Modify - service = client.services().inNamespace(currentNamespace).withName("my-service").get(); + service = client.services().inNamespace(session.getNamespace()).withName("my-service").get(); service.getSpec().getPorts().get(0).setTargetPort(new IntOrString(9998)); configMap.getData().put("test", "createOrReplace"); configMap.getData().put("io", "fabric8"); // Issue createOrReplace() list = new KubernetesListBuilder().withItems(deployment, service, configMap).build(); - List createdObjects = client.resourceList(list).inNamespace(currentNamespace).createOrReplace(); + List createdObjects = client.resourceList(list).inNamespace(session.getNamespace()).createOrReplace(); // Assert whether objects have been modified createdObjects.forEach((HasMetadata object) -> { @@ -165,69 +150,66 @@ public void createOrReplace() { }); // Cleanup - client.resourceList(list).inNamespace(currentNamespace).deletingExisting(); + client.resourceList(list).inNamespace(session.getNamespace()).deletingExisting(); } @Test public void delete() { + Pod pod1 = client.pods().inNamespace(session.getNamespace()).withName("resource-pod-delete").get(); await().atMost(30, TimeUnit.SECONDS).until(resourceIsReady(pod1)); - assertTrue(client.resource(pod1).inNamespace(currentNamespace).delete()); + assertTrue(client.resource(pod1).inNamespace(session.getNamespace()).delete()); } @Test - public void testDeletionWithOrphanDeletion() throws InterruptedException { + public void testDeletionWithOrphanDeletion() { // Create Deployment - client.resource(deployment).inNamespace(currentNamespace).createOrReplace(); + client.resource(deployment).inNamespace(session.getNamespace()).createOrReplace(); await().atMost(30, TimeUnit.SECONDS).until(resourceIsReady(deployment)); // Check whether child resources are also created - assertEquals(1, client.apps().replicaSets().inNamespace(currentNamespace).withLabel("run", "deploy1").list().getItems().size()); + assertEquals(1, client.apps().replicaSets().inNamespace(session.getNamespace()).withLabel("run", "deploy1").list().getItems().size()); // Delete deployment - Boolean deleted = client.resource(deployment).inNamespace(currentNamespace).withPropagationPolicy(DeletionPropagation.BACKGROUND).delete(); + Boolean deleted = client.resource(deployment).inNamespace(session.getNamespace()).withPropagationPolicy(DeletionPropagation.BACKGROUND).delete(); assertTrue(deleted); // Check whether child resources are also deleted await().atMost(30, TimeUnit.SECONDS) - .until(() -> client.apps().replicaSets().inNamespace(currentNamespace).withLabel("run", "deploy1").list().getItems().size() == 0); + .until(() -> client.apps().replicaSets().inNamespace(session.getNamespace()).withLabel("run", "deploy1").list().getItems().size() == 0); } @Test - public void testDeletionWithoutOrphanDeletion() throws InterruptedException { + public void testDeletionWithoutOrphanDeletion() { // Create Deployment - client.resource(deployment).inNamespace(currentNamespace).createOrReplace(); + client.resource(deployment).inNamespace(session.getNamespace()).createOrReplace(); await().atMost(30, TimeUnit.SECONDS).until(resourceIsReady(deployment)); // Check whether child resources are also created - assertEquals(1, client.apps().replicaSets().inNamespace(currentNamespace).withLabel("run", "deploy1").list().getItems().size()); + assertEquals(1, client.apps().replicaSets().inNamespace(session.getNamespace()).withLabel("run", "deploy1").list().getItems().size()); // Delete deployment - Boolean deleted = client.resource(deployment).inNamespace(currentNamespace).withPropagationPolicy(DeletionPropagation.ORPHAN).delete(); + Boolean deleted = client.resource(deployment).inNamespace(session.getNamespace()).withPropagationPolicy(DeletionPropagation.ORPHAN).delete(); assertTrue(deleted); // wait till deployment is deleted await().atMost(30, TimeUnit.SECONDS) - .until(() -> client.apps().deployments().inNamespace(currentNamespace).withLabel("run", "deploy1").list().getItems().size() == 0); + .until(() -> client.apps().deployments().inNamespace(session.getNamespace()).withLabel("run", "deploy1").list().getItems().size() == 0); // Check whether child resources are not deleted, they should be alive - assertEquals(1, client.apps().replicaSets().inNamespace(currentNamespace).withLabel("run", "deploy1").list().getItems().size()); + assertEquals(1, client.apps().replicaSets().inNamespace(session.getNamespace()).withLabel("run", "deploy1").list().getItems().size()); // cleanup resources which are not cleaned up during cascade deletion - client.apps().replicaSets().inNamespace(currentNamespace).withLabel("run", "deploy1").delete(); - } - - - @After - public void cleanup() throws InterruptedException { - client.pods().inNamespace(currentNamespace).delete(); - // Wait for resources to get destroyed - Thread.sleep(30000); + client.apps().replicaSets().inNamespace(session.getNamespace()).withLabel("run", "deploy1").delete(); } private Callable resourceIsReady(HasMetadata resource) { - return () -> client.resource(resource).inNamespace(currentNamespace).get()!= null; + return () -> client.resource(resource).inNamespace(session.getNamespace()).get()!= null; } private Callable resourceCleanedUp(HasMetadata resource) { - return () -> client.resource(resource).inNamespace(currentNamespace).get() == null; + return () -> client.resource(resource).inNamespace(session.getNamespace()).get() == null; } + @AfterClass + public static void cleanup() { + ClusterEntity.remove(ResourceIT.class.getResourceAsStream("/resource-it.yml")); + } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RoleBindingIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RoleBindingIT.java index 2adae97f1e1..4eed0204ab6 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RoleBindingIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RoleBindingIT.java @@ -15,19 +15,17 @@ */ package io.fabric8.kubernetes; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.DeleteEntity; import io.fabric8.kubernetes.api.model.rbac.RoleBinding; import io.fabric8.kubernetes.api.model.rbac.RoleBindingList; -import io.fabric8.kubernetes.api.model.rbac.RoleBindingBuilder; -import io.fabric8.kubernetes.api.model.rbac.RoleRefBuilder; -import io.fabric8.kubernetes.api.model.rbac.SubjectBuilder; import io.fabric8.kubernetes.client.KubernetesClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -38,7 +36,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeFalse; @RunWith(ArquillianConditionalRunner.class) @RequiresKubernetes @@ -52,45 +49,20 @@ public class RoleBindingIT { private RoleBinding roleBinding; - private String currentNamespace; - - @Before - public void init() { - - currentNamespace = session.getNamespace(); - - roleBinding = new RoleBindingBuilder() - .withNewMetadata() - .withName("read-jobs") - .withLabels(Collections.singletonMap("type", "io.fabric8.roleBindingIT")) - .endMetadata() - .addToSubjects(0, new SubjectBuilder() - .withApiGroup("rbac.authorization.k8s.io") - .withKind("User") - .withName("jane") - .withNamespace("default") - .build() - ) - .withRoleRef(new RoleRefBuilder() - .withApiGroup("rbac.authorization.k8s.io") - .withKind("Role") - .withName("job-reader") - .build() - ) - .build(); - - client.rbac().roleBindings().inNamespace(currentNamespace).createOrReplace(roleBinding); + @BeforeClass + public static void init() { + ClusterEntity.apply(RoleBindingIT.class.getResourceAsStream("/rolebinding-it.yml")); } @Test public void get() { - roleBinding = client.rbac().roleBindings().inNamespace(currentNamespace).withName("read-jobs").get(); + roleBinding = client.rbac().roleBindings().inNamespace(session.getNamespace()).withName("rb-get").get(); assertNotNull(roleBinding); assertEquals("RoleBinding", roleBinding.getKind()); assertNotNull(roleBinding.getMetadata()); - assertEquals("read-jobs", roleBinding.getMetadata().getName()); + assertEquals("rb-get", roleBinding.getMetadata().getName()); assertNotNull(roleBinding.getSubjects()); assertEquals(1, roleBinding.getSubjects().size()); assertEquals("rbac.authorization.k8s.io", roleBinding.getSubjects().get(0).getApiGroup()); @@ -106,7 +78,7 @@ public void get() { @Test public void load() { - RoleBinding aRoleBinding = client.rbac().roleBindings().inNamespace(currentNamespace) + RoleBinding aRoleBinding = client.rbac().roleBindings().inNamespace(session.getNamespace()) .load(getClass().getResourceAsStream("/test-kubernetesrolebinding.yml")).get(); assertNotNull(aRoleBinding); assertEquals("RoleBinding", aRoleBinding.getKind()); @@ -127,7 +99,7 @@ public void load() { @Test public void list() { - RoleBindingList roleBindingList = client.rbac().roleBindings().inNamespace(currentNamespace).withLabels(Collections.singletonMap("type", "io.fabric8.roleBindingIT")).list(); + RoleBindingList roleBindingList = client.rbac().roleBindings().inNamespace(session.getNamespace()).withLabels(Collections.singletonMap("type", "io.fabric8.roleBindingIT")).list(); assertNotNull(roleBindingList); assertNotNull(roleBindingList.getItems()); @@ -135,7 +107,7 @@ public void list() { assertNotNull(roleBindingList.getItems().get(0)); assertEquals("RoleBinding", roleBindingList.getItems().get(0).getKind()); assertNotNull(roleBindingList.getItems().get(0).getMetadata()); - assertEquals("read-jobs", roleBindingList.getItems().get(0).getMetadata().getName()); + assertEquals("rb-list", roleBindingList.getItems().get(0).getMetadata().getName()); assertNotNull(roleBindingList.getItems().get(0).getSubjects()); assertEquals(1, roleBindingList.getItems().get(0).getSubjects().size()); assertEquals("rbac.authorization.k8s.io", roleBindingList.getItems().get(0).getSubjects().get(0).getApiGroup()); @@ -151,13 +123,13 @@ public void list() { @Test public void update() { - roleBinding = client.rbac().roleBindings().inNamespace(currentNamespace).withName("read-jobs").edit() + roleBinding = client.rbac().roleBindings().inNamespace(session.getNamespace()).withName("rb-update").edit() .editSubject(0).withName("jane-new").endSubject().done(); assertNotNull(roleBinding); assertEquals("RoleBinding", roleBinding.getKind()); assertNotNull(roleBinding.getMetadata()); - assertEquals("read-jobs", roleBinding.getMetadata().getName()); + assertEquals("rb-update", roleBinding.getMetadata().getName()); assertNotNull(roleBinding.getSubjects()); assertEquals(1, roleBinding.getSubjects().size()); assertEquals("rbac.authorization.k8s.io", roleBinding.getSubjects().get(0).getApiGroup()); @@ -173,21 +145,20 @@ public void update() { @Test public void delete() { - Integer initialCountBeforeDeletion = client.rbac().roleBindings().inNamespace(currentNamespace).list().getItems().size(); - boolean deleted = client.rbac().roleBindings().inNamespace(currentNamespace).withName("read-jobs").delete(); + int initialCountBeforeDeletion = client.rbac().roleBindings().inNamespace(session.getNamespace()).list().getItems().size(); + boolean deleted = client.rbac().roleBindings().inNamespace(session.getNamespace()).withName("rb-delete").delete(); assertTrue(deleted); - DeleteEntity deleteEntity = new DeleteEntity<>(RoleBinding.class, client, "read-jobs", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(deleteEntity); + DeleteEntity deleteEntity = new DeleteEntity<>(RoleBinding.class, client, "read-jobs", session.getNamespace()); + await().atMost(60, TimeUnit.SECONDS).until(deleteEntity); - RoleBindingList roleBindingList = client.rbac().roleBindings().inNamespace(currentNamespace).list(); + RoleBindingList roleBindingList = client.rbac().roleBindings().inNamespace(session.getNamespace()).list(); assertEquals(initialCountBeforeDeletion - 1,roleBindingList.getItems().size()); } - @After - public void cleanup() { - client.rbac().roleBindings().inNamespace(currentNamespace).delete(); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(RoleBindingIT.class.getResourceAsStream("/rolebinding-it.yml")); } - } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RoleIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RoleIT.java index 193d858a244..800204dd796 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RoleIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/RoleIT.java @@ -15,28 +15,27 @@ */ package io.fabric8.kubernetes; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.DeleteEntity; import io.fabric8.kubernetes.api.model.rbac.Role; import io.fabric8.kubernetes.api.model.rbac.RoleList; -import io.fabric8.kubernetes.api.model.rbac.RoleBuilder; -import io.fabric8.kubernetes.api.model.rbac.PolicyRuleBuilder; import io.fabric8.kubernetes.client.KubernetesClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.runner.RunWith; -import org.junit.Before; import org.junit.Test; -import org.junit.After; +import java.util.Optional; import java.util.concurrent.TimeUnit; import static org.awaitility.Awaitility.await; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeFalse; @RunWith(ArquillianConditionalRunner.class) @RequiresKubernetes @@ -52,42 +51,20 @@ public class RoleIT { private String currentNamespace; - @Before - public void init() { - - currentNamespace = session.getNamespace(); - - // Do not run tests on opeshift 3.6.0 and 3.6.1 - assumeFalse(client.getVersion().getMajor().equalsIgnoreCase("1") - && client.getVersion().getMinor().startsWith("6")); - - Role role = new RoleBuilder() - .withNewMetadata() - .withName("job-reader") - .endMetadata() - .addToRules(0, new PolicyRuleBuilder() - .addToApiGroups(0,"batch") - .addToResourceNames(0,"my-job") - .addToResources(0,"jobs") - .addToVerbs(0, "get") - .addToVerbs(1, "watch") - .addToVerbs(2, "list") - .build() - ) - .build(); - - client.rbac().roles().inNamespace(currentNamespace).createOrReplace(role); + @BeforeClass + public static void init() { + ClusterEntity.apply(RoleIT.class.getResourceAsStream("/role-it.yml")); } @Test public void get() { - role = client.rbac().roles().inNamespace(currentNamespace).withName("job-reader").get(); + role = client.rbac().roles().inNamespace(currentNamespace).withName("role-get").get(); assertNotNull(role); assertEquals("Role", role.getKind()); assertNotNull(role.getMetadata()); - assertEquals("job-reader", role.getMetadata().getName()); + assertEquals("role-get", role.getMetadata().getName()); assertNotNull(role.getRules()); assertEquals(1, role.getRules().size()); assertNotNull(role.getRules().get(0).getApiGroups()); @@ -144,40 +121,42 @@ public void list() { assertNotNull(roleList); assertNotNull(roleList.getItems()); - assertEquals(1, roleList.getItems().size()); - assertNotNull(roleList.getItems().get(0)); - assertEquals("Role", roleList.getItems().get(0).getKind()); - assertNotNull(roleList.getItems().get(0).getMetadata()); - assertEquals("job-reader", roleList.getItems().get(0).getMetadata().getName()); - assertNotNull(roleList.getItems().get(0).getRules()); - assertEquals(1, roleList.getItems().get(0).getRules().size()); - assertNotNull(roleList.getItems().get(0).getRules().get(0).getApiGroups()); - assertEquals(1, roleList.getItems().get(0).getRules().get(0).getApiGroups().size()); - assertEquals("batch", roleList.getItems().get(0).getRules().get(0).getApiGroups().get(0)); - assertNotNull(roleList.getItems().get(0).getRules().get(0).getResourceNames()); - assertEquals(1, roleList.getItems().get(0).getRules().get(0).getResourceNames().size()); - assertEquals("my-job", roleList.getItems().get(0).getRules().get(0).getResourceNames().get(0)); - assertNotNull(roleList.getItems().get(0).getRules().get(0).getResources()); - assertEquals(1, roleList.getItems().get(0).getRules().get(0).getResources().size()); - assertEquals("jobs", roleList.getItems().get(0).getRules().get(0).getResources().get(0)); - assertNotNull(roleList.getItems().get(0).getRules().get(0).getVerbs()); - assertEquals(3, roleList.getItems().get(0).getRules().get(0).getVerbs().size()); - assertEquals("get", roleList.getItems().get(0).getRules().get(0).getVerbs().get(0)); - assertEquals("watch", roleList.getItems().get(0).getRules().get(0).getVerbs().get(1)); - assertEquals("list", roleList.getItems().get(0).getRules().get(0).getVerbs().get(2)); + assertTrue(roleList.getItems().size() >= 1); + Optional role = roleList.getItems().stream().filter(r -> r.getMetadata().getName().equals("role-list")).findFirst(); + assertTrue(role.isPresent()); + assertNotNull(role.get()); + assertEquals("Role", role.get().getKind()); + assertNotNull(role.get().getMetadata()); + assertEquals("role-list", role.get().getMetadata().getName()); + assertNotNull(role.get().getRules()); + assertEquals(1, role.get().getRules().size()); + assertNotNull(role.get().getRules().get(0).getApiGroups()); + assertEquals(1, role.get().getRules().get(0).getApiGroups().size()); + assertEquals("batch", role.get().getRules().get(0).getApiGroups().get(0)); + assertNotNull(role.get().getRules().get(0).getResourceNames()); + assertEquals(1, role.get().getRules().get(0).getResourceNames().size()); + assertEquals("my-job", role.get().getRules().get(0).getResourceNames().get(0)); + assertNotNull(role.get().getRules().get(0).getResources()); + assertEquals(1, role.get().getRules().get(0).getResources().size()); + assertEquals("jobs", role.get().getRules().get(0).getResources().get(0)); + assertNotNull(role.get().getRules().get(0).getVerbs()); + assertEquals(3, role.get().getRules().get(0).getVerbs().size()); + assertEquals("get", role.get().getRules().get(0).getVerbs().get(0)); + assertEquals("watch", role.get().getRules().get(0).getVerbs().get(1)); + assertEquals("list", role.get().getRules().get(0).getVerbs().get(2)); } @Test public void update() { - role = client.rbac().roles().inNamespace(currentNamespace).withName("job-reader").edit() + role = client.rbac().roles().inNamespace(currentNamespace).withName("role-update").edit() .editRule(0).addToApiGroups(1, "extensions").endRule().done(); assertNotNull(role); assertEquals("Role", role.getKind()); assertNotNull(role.getMetadata()); - assertEquals("job-reader", role.getMetadata().getName()); + assertEquals("role-update", role.getMetadata().getName()); assertNotNull(role.getRules()); assertEquals(1, role.getRules().size()); assertNotNull(role.getRules().get(0).getApiGroups()); @@ -200,21 +179,20 @@ public void update() { @Test public void delete() { - Integer countBeforeDeletion = client.rbac().roles().inNamespace(currentNamespace).list().getItems().size(); - boolean deleted = client.rbac().roles().inNamespace(currentNamespace).delete(); + int countBeforeDeletion = client.rbac().roles().inNamespace(currentNamespace).list().getItems().size(); + boolean deleted = client.rbac().roles().inNamespace(currentNamespace).withName("role-delete").delete(); assertTrue(deleted); - DeleteEntity deleteEntity = new DeleteEntity<>(Role.class, client, "job-reader", currentNamespace); + DeleteEntity deleteEntity = new DeleteEntity<>(Role.class, client, "role-delete", currentNamespace); await().atMost(30, TimeUnit.SECONDS).until(deleteEntity); RoleList roleList = client.rbac().roles().inNamespace(currentNamespace).list(); assertEquals(countBeforeDeletion - 1,roleList.getItems().size()); } - @After - public void cleanup() { - client.rbac().roles().inNamespace(currentNamespace).delete(); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(RoleIT.class.getResourceAsStream("/role-it.yml")); } - } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/SecretIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/SecretIT.java index 1bd8cf12b38..29b9d3f2fbd 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/SecretIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/SecretIT.java @@ -16,16 +16,23 @@ package io.fabric8.kubernetes; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; -import io.fabric8.kubernetes.api.model.*; +import io.fabric8.kubernetes.api.model.Pod; +import io.fabric8.kubernetes.api.model.PodBuilder; +import io.fabric8.kubernetes.api.model.Secret; +import io.fabric8.kubernetes.api.model.SecretBuilder; +import io.fabric8.kubernetes.api.model.SecretList; +import io.fabric8.kubernetes.api.model.SecretVolumeSource; +import io.fabric8.kubernetes.api.model.SecretVolumeSourceBuilder; +import io.fabric8.kubernetes.api.model.Volume; import io.fabric8.kubernetes.client.KubernetesClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -50,16 +57,9 @@ public class SecretIT { private String currentNamespace; - @Before - public void init() { - currentNamespace = session.getNamespace(); - secret1 = new SecretBuilder() - .withNewMetadata().withName("secret1").endMetadata() - .addToData("username", "guccifer") - .addToData("password", "shadowgovernment") - .build(); - - client.secrets().inNamespace(currentNamespace).createOrReplace(secret1); + @BeforeClass + public static void init() { + ClusterEntity.apply(SecretIT.class.getResourceAsStream("/secret-it.yml")); } @Test @@ -71,7 +71,7 @@ public void load() { @Test public void get() { - secret1 = client.secrets().inNamespace(currentNamespace).withName("secret1").get(); + secret1 = client.secrets().inNamespace(currentNamespace).withName("secret-get").get(); assertNotNull(secret1); } @@ -79,36 +79,25 @@ public void get() { public void list() { SecretList aSecretList = client.secrets().inNamespace(currentNamespace).list(); assertNotNull(aSecretList); - assertTrue(aSecretList.getItems().size() > 1); + assertTrue(aSecretList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity secretReady = new ReadyEntity<>(Secret.class, client, "secret1", currentNamespace); - secret1 = client.secrets().inNamespace(currentNamespace).withName("secret1").edit() - .withType("Opaque") + ReadyEntity secretReady = new ReadyEntity<>(Secret.class, client, "secret-update", currentNamespace); + secret1 = client.secrets().inNamespace(currentNamespace).withName("secret-update").edit() + .editOrNewMetadata().addToLabels("foo", "bar").endMetadata() .done(); await().atMost(30, TimeUnit.SECONDS).until(secretReady); assertThat(secret1).isNotNull(); - assertEquals("Opaque", secret1.getType()); + assertEquals("bar", secret1.getMetadata().getLabels().get("foo")); } @Test - public void delete() - { - ReadyEntity secretReady = new ReadyEntity<>(Secret.class, client, "secret1", currentNamespace); + public void delete() { + ReadyEntity secretReady = new ReadyEntity<>(Secret.class, client, "secret-delete", currentNamespace); await().atMost(30, TimeUnit.SECONDS).until(secretReady); - assertTrue(client.secrets().inNamespace(currentNamespace).withName("secret1").delete()); - } - - @After - public void cleanup() throws InterruptedException { - if (client.secrets().inNamespace(currentNamespace).list().getItems().size()!= 0) { - client.secrets().inNamespace(currentNamespace).withName("secret1").delete(); - } - // Wait for resources to get destroyed - DeleteEntity secretDelete = new DeleteEntity<>(Secret.class, client, "secret1", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(secretDelete); + assertTrue(client.secrets().inNamespace(currentNamespace).withName("secret-delete").delete()); } @Test @@ -126,7 +115,7 @@ public void testLoadInPod() { .build(); Pod pod1 = client.pods().inNamespace(currentNamespace).create(new PodBuilder() - .withNewMetadata().withName("pod1").endMetadata() + .withNewMetadata().withName("pod-secret1").endMetadata() .withNewSpec() .addNewContainer().withName("mysql").withImage("openshift/mysql-55-centos7").endContainer() .addNewVolume().withName("foo").withSecret(secretVolumeSource).endVolume() @@ -140,4 +129,9 @@ public void testLoadInPod() { .withName(aVolume.getSecret().getSecretName()).get(); assertThat(fetchedSecret).isNotNull(); } + + @AfterClass + public static void cleanup() { + ClusterEntity.remove(SecretIT.class.getResourceAsStream("/secret-it.yml")); + } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ServiceAccountIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ServiceAccountIT.java index ed9a49f96f0..4b92a04dca6 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ServiceAccountIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ServiceAccountIT.java @@ -16,7 +16,7 @@ package io.fabric8.kubernetes; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; import io.fabric8.kubernetes.api.model.*; import io.fabric8.kubernetes.client.KubernetesClient; @@ -24,8 +24,8 @@ import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -43,31 +43,18 @@ public class ServiceAccountIT { KubernetesClient client; @ArquillianResource - Session session; - private ServiceAccount serviceAccount1, serviceAccount2; - - private String currentNamespace; + private ServiceAccount serviceAccount1; - @Before - public void init() { - currentNamespace = session.getNamespace(); - serviceAccount1 = new ServiceAccountBuilder() - .withNewMetadata().withName("serviceaccount1").endMetadata() - .build(); - serviceAccount2 = new ServiceAccountBuilder() - .withNewMetadata().withName("serviceaccount2").endMetadata() - .withAutomountServiceAccountToken(false) - .build(); - - client.serviceAccounts().inNamespace(currentNamespace).create(serviceAccount1); - client.serviceAccounts().inNamespace(currentNamespace).create(serviceAccount2); + @BeforeClass + public static void init() { + ClusterEntity.apply(ServiceAccountIT.class.getResourceAsStream("/serviceaccount-it.yml")); } @Test public void load() { - ServiceAccount svcAccount = client.serviceAccounts().inNamespace(currentNamespace) + ServiceAccount svcAccount = client.serviceAccounts().inNamespace(session.getNamespace()) .load(getClass().getResourceAsStream("/test-serviceaccount.yml")).get(); assertThat(svcAccount).isNotNull(); assertThat(svcAccount.getMetadata().getName()).isNotNull(); @@ -75,24 +62,22 @@ public void load() { @Test public void get() { - serviceAccount1 = client.serviceAccounts().inNamespace(currentNamespace).withName("serviceaccount1").get(); + serviceAccount1 = client.serviceAccounts().inNamespace(session.getNamespace()).withName("sa-get").get(); assertNotNull(serviceAccount1); - serviceAccount2 = client.serviceAccounts().inNamespace(currentNamespace).withName("serviceaccount2").get(); - assertNotNull(serviceAccount2); } @Test public void list() { - ServiceAccountList svcAccountList = client.serviceAccounts().inNamespace(currentNamespace).list(); + ServiceAccountList svcAccountList = client.serviceAccounts().inNamespace(session.getNamespace()).list(); assertThat(svcAccountList).isNotNull(); // Every namespace has a default service account resource. - assertTrue(svcAccountList.getItems().size() >= 2); + assertTrue(svcAccountList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity serviceAccountReady = new ReadyEntity<>(ServiceAccount.class, client, "serviceaccount1", currentNamespace); - serviceAccount1 = client.serviceAccounts().inNamespace(currentNamespace).withName("serviceaccount1").edit() + ReadyEntity serviceAccountReady = new ReadyEntity<>(ServiceAccount.class, client, "sa-update", session.getNamespace()); + serviceAccount1 = client.serviceAccounts().inNamespace(session.getNamespace()).withName("sa-update").edit() .addNewSecret().withName("default-token-uudp").endSecret() .addNewImagePullSecret().withName("myregistrykey").endImagePullSecret() .done(); @@ -102,20 +87,14 @@ public void update() { @Test public void delete() { - ReadyEntity serviceAccountReady = new ReadyEntity<>(ServiceAccount.class, client, "serviceaccount1", currentNamespace); + ReadyEntity serviceAccountReady = new ReadyEntity<>(ServiceAccount.class, client, "sa-delete", session.getNamespace()); await().atMost(30, TimeUnit.SECONDS).until(serviceAccountReady); - boolean bDeleted = client.serviceAccounts().inNamespace(currentNamespace).withName("serviceaccount1").delete(); + boolean bDeleted = client.serviceAccounts().inNamespace(session.getNamespace()).withName("sa-delete").delete(); assertTrue(bDeleted); } - @After - public void cleanup() throws InterruptedException { - client.serviceAccounts().inNamespace(currentNamespace).delete(); - // Wait for resources to get destroyed - DeleteEntity serviceAccount1Delete = new DeleteEntity<>(ServiceAccount.class, client, "serviceaccount1", currentNamespace); - DeleteEntity serviceAccount2Delete = new DeleteEntity<>(ServiceAccount.class, client, "serviceaccount2", currentNamespace); - - await().atMost(30, TimeUnit.SECONDS).until(serviceAccount1Delete); - await().atMost(30, TimeUnit.SECONDS).until(serviceAccount2Delete); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(ServiceAccountIT.class.getResourceAsStream("/serviceaccount-it.yml")); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ServiceIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ServiceIT.java index 70f98fe4199..dfa35ff27ac 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ServiceIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/ServiceIT.java @@ -16,23 +16,21 @@ package io.fabric8.kubernetes; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; -import io.fabric8.kubernetes.api.model.*; -import io.fabric8.kubernetes.api.model.extensions.Ingress; +import io.fabric8.kubernetes.api.model.IntOrString; +import io.fabric8.kubernetes.api.model.Service; +import io.fabric8.kubernetes.api.model.ServiceList; import io.fabric8.kubernetes.client.KubernetesClient; -import io.fabric8.openshift.api.model.RouteBuilder; -import io.fabric8.openshift.client.OpenShiftClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; -import java.util.Collections; import java.util.concurrent.TimeUnit; import static junit.framework.TestCase.assertNotNull; @@ -50,80 +48,37 @@ public class ServiceIT { @ArquillianResource Session session; - private Service svc1, svc2; + private Service svc1; - private String currentNamespace; - - @Before - public void init() { - currentNamespace = session.getNamespace(); - svc1 = new ServiceBuilder() - .withNewMetadata() - .withName("svc1") - .endMetadata() - .withNewSpec() - .withSelector(Collections.singletonMap("app", "MyApp")) - .addNewPort() - .withName("http") - .withProtocol("TCP") - .withPort(80) - .withTargetPort(new IntOrString(9376)) - .endPort() - .withType("LoadBalancer") - .endSpec() - .withNewStatus() - .withNewLoadBalancer() - .addNewIngress() - .withIp("146.148.47.155") - .endIngress() - .endLoadBalancer() - .endStatus() - .build(); - svc2 = new ServiceBuilder() - .withNewMetadata().withName("svc2").endMetadata() - .withNewSpec().withType("ExternalName").withExternalName("my.database.example.com") - .addNewPort().withName("80").withProtocol("TCP").withPort(80).endPort() - .endSpec() - .withNewStatus() - .withNewLoadBalancer() - .addNewIngress() - .withIp("146.148.47.155") - .endIngress() - .endLoadBalancer() - .endStatus() - .build(); - - client.services().inNamespace(currentNamespace).createOrReplace(svc1); - client.services().inNamespace(currentNamespace).createOrReplace(svc2); + @BeforeClass + public static void init() { + ClusterEntity.apply(ServiceIT.class.getResourceAsStream("/service-it.yml")); } @Test public void load() { - String currentNamespace = session.getNamespace(); - Service aService = client.services().inNamespace(currentNamespace).load(getClass().getResourceAsStream("/test-service.yml")).get(); + Service aService = client.services().inNamespace(session.getNamespace()).load(getClass().getResourceAsStream("/test-service.yml")).get(); assertThat(aService).isNotNull(); assertEquals("my-service", aService.getMetadata().getName()); } @Test public void get() { - svc1 = client.services().inNamespace(currentNamespace).withName("svc1").get(); + svc1 = client.services().inNamespace(session.getNamespace()).withName("service-get").get(); assertNotNull(svc1); - svc2 = client.services().inNamespace(currentNamespace).withName("svc2").get(); - assertNotNull(svc2); } @Test public void list() { - ServiceList aServiceList = client.services().inNamespace(currentNamespace).list(); + ServiceList aServiceList = client.services().inNamespace(session.getNamespace()).list(); assertThat(aServiceList).isNotNull(); - assertEquals(2, aServiceList.getItems().size()); + assertTrue(aServiceList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity serviceReady = new ReadyEntity<>(Service.class, client, "svc1", currentNamespace); - svc1 = client.services().inNamespace(currentNamespace).withName("svc1").edit() + ReadyEntity serviceReady = new ReadyEntity<>(Service.class, client, "service-update", session.getNamespace()); + svc1 = client.services().inNamespace(session.getNamespace()).withName("service-update").edit() .editSpec().addNewPort().withName("https").withProtocol("TCP").withPort(443).withTargetPort(new IntOrString(9377)).endPort().endSpec() .done(); await().atMost(30, TimeUnit.SECONDS).until(serviceReady); @@ -133,57 +88,15 @@ public void update() { @Test public void delete() { - ReadyEntity serviceReady = new ReadyEntity<>(Service.class, client, "svc2", currentNamespace); + ReadyEntity serviceReady = new ReadyEntity<>(Service.class, client, "service-delete", session.getNamespace()); await().atMost(30, TimeUnit.SECONDS).until(serviceReady); - boolean bDeleted = client.services().inNamespace(currentNamespace).withName("svc2").delete(); + boolean bDeleted = client.services().inNamespace(session.getNamespace()).withName("service-delete").delete(); assertTrue(bDeleted); } - @Test - public void getURL() { - // Testing NodePort Impl - String url = client.services().inNamespace(currentNamespace).withName("svc1").getURL("http"); - assertNotNull(url); - - // Testing Ingress Impl - Ingress ingress = client.extensions().ingresses().load(getClass().getResourceAsStream("/test-ingress.yml")).get(); - client.extensions().ingresses().inNamespace(currentNamespace).create(ingress); - - url = client.services().inNamespace(currentNamespace).withName("svc2").getURL("80"); - assertNotNull(url); - - // Testing OpenShift Route Impl - Service svc3 = client.services().inNamespace(currentNamespace).create(new ServiceBuilder() - .withNewMetadata().withName("svc3").endMetadata() - .withNewSpec() - .addNewPort().withName("80").withProtocol("TCP").withPort(80).endPort() - .endSpec() - .build()); - - OpenShiftClient openshiftClient = client.adapt(OpenShiftClient.class); - openshiftClient.routes().inNamespace(currentNamespace).create(new RouteBuilder() - .withNewMetadata().withName(svc3.getMetadata().getName()).endMetadata() - .withNewSpec() - .withHost("www.example.com") - .withNewTo().withName(svc3.getMetadata().getName()).withKind("Service").endTo() - .endSpec() - .build()); - - url = client.services().inNamespace(currentNamespace).withName("svc3").getURL("80"); - assertNotNull(url); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(ServiceIT.class.getResourceAsStream("/service-it.yml")); } - @After - public void cleanup() throws InterruptedException { - if (client.services().inNamespace(currentNamespace).list().getItems().size()!= 0) { - client.services().inNamespace(currentNamespace).delete(); - } - - // Wait for resources to get destroyed - DeleteEntity service1Delete = new DeleteEntity<>(Service.class, client, "svc1", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(service1Delete); - - DeleteEntity service2Delete = new DeleteEntity<>(Service.class, client, "svc2", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(service2Delete); - } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/StatefulSetIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/StatefulSetIT.java index edd3f793ae3..bff41a3e57a 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/StatefulSetIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/StatefulSetIT.java @@ -16,23 +16,20 @@ package io.fabric8.kubernetes; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; -import io.fabric8.kubernetes.api.model.Quantity; import io.fabric8.kubernetes.api.model.apps.StatefulSet; -import io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder; import io.fabric8.kubernetes.api.model.apps.StatefulSetList; import io.fabric8.kubernetes.client.KubernetesClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; -import java.util.Collections; import java.util.concurrent.TimeUnit; import static junit.framework.TestCase.assertNotNull; @@ -52,56 +49,14 @@ public class StatefulSetIT { private StatefulSet ss1; - private String currentNamespace; - - @Before - public void init() { - currentNamespace = session.getNamespace(); - ss1 = new StatefulSetBuilder() - .withNewMetadata().withName("ss1").endMetadata() - .withNewSpec() - .withReplicas(2) - .withNewSelector().withMatchLabels(Collections.singletonMap("app", "nginx")).endSelector() - .withNewTemplate() - .withNewMetadata() - .addToLabels("app", "nginx") - .endMetadata() - .withNewSpec() - .addNewContainer() - .withName("nginx") - .withImage("nginx") - .addNewPort() - .withContainerPort(80) - .withName("web") - .endPort() - .addNewVolumeMount() - .withName("www") - .withMountPath("/usr/share/nginx/html") - .endVolumeMount() - .endContainer() - .endSpec() - .endTemplate() - .addNewVolumeClaimTemplate() - .withNewMetadata() - .withName("www") - .endMetadata() - .withNewSpec() - .addToAccessModes("ReadWriteOnce") - .withNewResources() - .withRequests(Collections.singletonMap("storage", new Quantity("1Gi"))) - .endResources() - .endSpec() - .endVolumeClaimTemplate() - .endSpec() - .build(); - - client.apps().statefulSets().inNamespace(currentNamespace).create(ss1); + @BeforeClass + public static void init() { + ClusterEntity.apply(StatefulSetIT.class.getResourceAsStream("/statefulset-it.yml")); } @Test public void load() { - String currentNamespace = session.getNamespace(); - StatefulSet aStatefulSet = client.apps().statefulSets().inNamespace(currentNamespace) + StatefulSet aStatefulSet = client.apps().statefulSets().inNamespace(session.getNamespace()) .load(getClass().getResourceAsStream("/test-statefulset.yml")).get(); assertThat(aStatefulSet).isNotNull(); assertEquals("web", aStatefulSet.getMetadata().getName()); @@ -109,40 +64,36 @@ public void load() { @Test public void get() { - ss1 = client.apps().statefulSets().inNamespace(currentNamespace).withName("ss1").get(); + ss1 = client.apps().statefulSets().inNamespace(session.getNamespace()).withName("ss-get").get(); assertNotNull(ss1); } @Test public void list() { - StatefulSetList statefulSetList = client.apps().statefulSets().inNamespace(currentNamespace).list(); + StatefulSetList statefulSetList = client.apps().statefulSets().inNamespace(session.getNamespace()).list(); assertThat(statefulSetList).isNotNull(); - assertEquals(1, statefulSetList.getItems().size()); + assertTrue(statefulSetList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity statefulSetReady = new ReadyEntity<>(StatefulSet.class, client, "ss1", currentNamespace); - ss1 = client.apps().statefulSets().inNamespace(currentNamespace).withName("ss1").scale(5); + ReadyEntity statefulSetReady = new ReadyEntity<>(StatefulSet.class, client, "ss-update", session.getNamespace()); + ss1 = client.apps().statefulSets().inNamespace(session.getNamespace()).withName("ss-update").scale(5); await().atMost(30, TimeUnit.SECONDS).until(statefulSetReady); assertEquals(5, ss1.getSpec().getReplicas().intValue()); } @Test public void delete() { - ReadyEntity statefulSetReady = new ReadyEntity<>(StatefulSet.class, client, "ss1", currentNamespace); + ReadyEntity statefulSetReady = new ReadyEntity<>(StatefulSet.class, client, "ss-delete", session.getNamespace()); await().atMost(30, TimeUnit.SECONDS).until(statefulSetReady); - boolean bDeleted = client.apps().statefulSets().inNamespace(currentNamespace).withName("ss1").delete(); + boolean bDeleted = client.apps().statefulSets().inNamespace(session.getNamespace()).withName("ss-delete ").delete(); assertTrue(bDeleted); } - @After - public void cleanup() throws InterruptedException { - if (client.apps().statefulSets().inNamespace(currentNamespace).list().getItems().size()!= 0) { - client.apps().statefulSets().inNamespace(currentNamespace).delete(); - } - // Wait for resources to get destroyed - DeleteEntity statefulSetDelete = new DeleteEntity<>(StatefulSet.class, client, "ss1", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(statefulSetDelete); + @AfterClass + public static void cleanup() { + ClusterEntity.remove(StatefulSetIT.class.getResourceAsStream("/statefulset-it.yml")); } + } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/StorageClassIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/StorageClassIT.java index 89b429d8ae7..d82be88bb65 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/StorageClassIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/StorageClassIT.java @@ -15,9 +15,8 @@ */ package io.fabric8.kubernetes; -import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.commons.ClusterEntity; import io.fabric8.kubernetes.api.model.storage.StorageClass; -import io.fabric8.kubernetes.api.model.storage.StorageClassBuilder; import io.fabric8.kubernetes.api.model.storage.StorageClassList; import io.fabric8.kubernetes.client.KubernetesClient; import static junit.framework.TestCase.assertNotNull; @@ -27,14 +26,12 @@ import org.jboss.arquillian.test.api.ArquillianResource; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import org.junit.Before; + +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; -import java.util.HashMap; -import java.util.Map; -import java.util.UUID; - @RunWith(ArquillianConditionalRunner.class) @RequiresKubernetes public class StorageClassIT { @@ -46,25 +43,10 @@ public class StorageClassIT { private StorageClass storageClass; - private String name = UUID.randomUUID().toString(); - - - @Before - public void init() { - ObjectMeta metadata = new ObjectMeta(); - metadata.setName(name); - Map parameters = new HashMap<>(); - parameters.put("key", "value1"); - - storageClass = new StorageClassBuilder().withApiVersion("storage.k8s.io/v1") - .withKind("StorageClass") - .withMetadata(metadata) - .withParameters(parameters) - .withProvisioner("k8s.io/minikube-hostpath") - .build(); - - client.storage().storageClasses().createOrReplace(storageClass); + @BeforeClass + public static void init() { + ClusterEntity.apply(StorageClassIT.class.getResourceAsStream("/storageclass-it.yml")); } @Test @@ -76,7 +58,7 @@ public void load() { @Test public void get() { - storageClass = client.storage().storageClasses().withName(name).get(); + storageClass = client.storage().storageClasses().withName("storageclass-get").get(); assertNotNull(storageClass); } @@ -84,19 +66,24 @@ public void get() { public void list() { StorageClassList storageClassList = client.storage().storageClasses().list(); assertNotNull(storageClassList); - assertTrue(storageClassList.getItems().size() > 1); + assertTrue(storageClassList.getItems().size() >= 1); } @Test public void update() { - storageClass = client.storage().storageClasses().withName(name).edit().editMetadata().addToLabels("testLabel", "testLabelValue").endMetadata().done(); + storageClass = client.storage().storageClasses().withName("storageclass-update").edit().editMetadata().addToLabels("testLabel", "testLabelValue").endMetadata().done(); assertNotNull(storageClass); assertEquals("testLabelValue", storageClass.getMetadata().getLabels().get("testLabel")); } @Test public void delete() { - assertTrue(client.storage().storageClasses().delete(storageClass)); + assertTrue(client.storage().storageClasses().withName("storageclass-delete").delete()); + } + + @AfterClass + public static void cleanup() { + ClusterEntity.remove(StorageClassIT.class.getResourceAsStream("/storageclass-it.yml")); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/WaitUntilReadyIT.java b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/WaitUntilReadyIT.java index 6e163648e95..2d6841a2da5 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/kubernetes/WaitUntilReadyIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/kubernetes/WaitUntilReadyIT.java @@ -25,11 +25,12 @@ import org.arquillian.cube.kubernetes.impl.requirement.RequiresKubernetes; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; +import org.junit.FixMethodOrder; import org.junit.Test; import org.junit.runner.RunWith; +import org.junit.runners.MethodSorters; import java.util.Collections; -import java.util.Date; import java.util.concurrent.TimeUnit; @RunWith(ArquillianConditionalRunner.class) diff --git a/kubernetes-itests/src/test/java/io/fabric8/openshift/BuildConfigIT.java b/kubernetes-itests/src/test/java/io/fabric8/openshift/BuildConfigIT.java index 1ab6396d9bb..14f7576f0d8 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/openshift/BuildConfigIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/openshift/BuildConfigIT.java @@ -16,18 +16,17 @@ package io.fabric8.openshift; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; import io.fabric8.openshift.api.model.BuildConfig; -import io.fabric8.openshift.api.model.BuildConfigBuilder; import io.fabric8.openshift.api.model.BuildConfigList; import io.fabric8.openshift.client.OpenShiftClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.openshift.impl.requirement.RequiresOpenshift; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -48,65 +47,14 @@ public class BuildConfigIT { @ArquillianResource Session session; - private BuildConfig buildConfig1, buildConfig2; - - private String currentNamespace; - - @Before - public void init() { - currentNamespace = session.getNamespace(); - buildConfig1 = new BuildConfigBuilder() - .withNewMetadata().withName("bc1").endMetadata() - .withNewSpec() - .addNewTrigger() - .withType("GitHub") - .withNewGithub() - .withSecret("secret101") - .endGithub() - .endTrigger() - .addNewTrigger() - .withType("Generic") - .withNewGeneric() - .withSecret("secret101") - .endGeneric() - .endTrigger() - .addNewTrigger() - .withType("ImageChange") - .endTrigger() - .withNewSource() - .withType("Git") - .withNewGit() - .withUri("https://github.com/openshift/ruby-hello-world") - .endGit() - .withDockerfile("FROM openshift/ruby-22-centos7\\nUSER example") - .endSource() - .withNewStrategy() - .withType("Source") - .withNewSourceStrategy() - .withNewFrom() - .withKind("ImageStreamTag") - .withName("origin-ruby-sample:latest") - .endFrom() - .endSourceStrategy() - .endStrategy() - .withNewOutput() - .withNewTo() - .withKind("ImageStreamTag") - .withName("origin-ruby-sample:latest") - .endTo() - .endOutput() - .withNewPostCommit() - .withScript("bundle exec rake test") - .endPostCommit() - .endSpec() - .build(); - - client.buildConfigs().inNamespace(currentNamespace).createOrReplace(buildConfig1); + @BeforeClass + public static void init() { + ClusterEntity.apply(BuildConfigIT.class.getResourceAsStream("/buildconfig-it.yml")); } @Test public void load() { - BuildConfig aBuildConfig = client.buildConfigs().inNamespace(currentNamespace) + BuildConfig aBuildConfig = client.buildConfigs().inNamespace(session.getNamespace()) .load(getClass().getResourceAsStream("/test-buildconfig.yml")).get(); assertThat(aBuildConfig).isNotNull(); assertEquals("ruby-sample-build", aBuildConfig.getMetadata().getName()); @@ -114,20 +62,20 @@ public void load() { @Test public void get() { - assertNotNull(client.buildConfigs().inNamespace(currentNamespace).withName("bc1").get()); + assertNotNull(client.buildConfigs().inNamespace(session.getNamespace()).withName("bc-get").get()); } @Test public void list() { - BuildConfigList bcList = client.buildConfigs().inNamespace(currentNamespace).list(); + BuildConfigList bcList = client.buildConfigs().inNamespace(session.getNamespace()).list(); assertThat(bcList).isNotNull(); - assertEquals(1, bcList.getItems().size()); + assertTrue(bcList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity buildConfigReady = new ReadyEntity<>(BuildConfig.class, client, "bc1", currentNamespace); - buildConfig1 = client.buildConfigs().inNamespace(currentNamespace).withName("bc1").edit() + ReadyEntity buildConfigReady = new ReadyEntity<>(BuildConfig.class, client, "bc-update", session.getNamespace()); + BuildConfig buildConfig1 = client.buildConfigs().inNamespace(session.getNamespace()).withName("bc-update").edit() .editSpec().withFailedBuildsHistoryLimit(5).endSpec().done(); await().atMost(30, TimeUnit.SECONDS).until(buildConfigReady); assertEquals(5, buildConfig1.getSpec().getFailedBuildsHistoryLimit().intValue()); @@ -135,19 +83,9 @@ public void update() { @Test public void delete() { - ReadyEntity buildConfigReady = new ReadyEntity<>(BuildConfig.class, client, "bc1", currentNamespace); + ReadyEntity buildConfigReady = new ReadyEntity<>(BuildConfig.class, client, "bc-delete", session.getNamespace()); await().atMost(30, TimeUnit.SECONDS).until(buildConfigReady); - boolean bDeleted = client.buildConfigs().inNamespace(currentNamespace).withName("bc1").delete(); + boolean bDeleted = client.buildConfigs().inNamespace(session.getNamespace()).withName("bc-delete").delete(); assertTrue(bDeleted); } - - @After - public void cleanup() throws InterruptedException { - if (client.buildConfigs().list().getItems().size() != 0) { - client.buildConfigs().inNamespace(currentNamespace).delete(); - } - - DeleteEntity buildConfigDelete = new DeleteEntity<>(BuildConfig.class, client, "bc1", currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(buildConfigDelete); - } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/openshift/DeploymentConfigIT.java b/kubernetes-itests/src/test/java/io/fabric8/openshift/DeploymentConfigIT.java index 32ea4e84cea..ea258865141 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/openshift/DeploymentConfigIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/openshift/DeploymentConfigIT.java @@ -16,18 +16,16 @@ package io.fabric8.openshift; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; import io.fabric8.openshift.api.model.DeploymentConfig; -import io.fabric8.openshift.api.model.DeploymentConfigBuilder; import io.fabric8.openshift.api.model.DeploymentConfigList; import io.fabric8.openshift.client.OpenShiftClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.openshift.impl.requirement.RequiresOpenshift; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -48,37 +46,14 @@ public class DeploymentConfigIT { @ArquillianResource Session session; - private DeploymentConfig deploymentConfig1; - - private String currentNamespace; - - @Before - public void init() { - currentNamespace = session.getNamespace(); - deploymentConfig1 = new DeploymentConfigBuilder() - .withNewMetadata().withName("deploymentconfig1").endMetadata() - .withNewSpec() - .withReplicas(2) - .withNewTemplate() - .withNewMetadata() - .addToLabels("app", "database") - .endMetadata() - .withNewSpec() - .addNewContainer() - .withName("mysql") - .withImage("openshift/mysql-55-centos7") - .endContainer() - .endSpec() - .endTemplate() - .endSpec() - .build(); - - client.deploymentConfigs().inNamespace(currentNamespace).create(deploymentConfig1); + @BeforeClass + public static void init() { + ClusterEntity.apply(DeploymentConfigIT.class.getResourceAsStream("/deploymentconfig-it.yml")); } @Test public void load() { - DeploymentConfig deploymentConfig = client.deploymentConfigs().inNamespace(currentNamespace) + DeploymentConfig deploymentConfig = client.deploymentConfigs().inNamespace(session.getNamespace()) .load(getClass().getResourceAsStream("/test-deploymentconfig.yml")).get(); assertThat(deploymentConfig).isNotNull(); assertEquals("frontend", deploymentConfig.getMetadata().getName()); @@ -86,20 +61,20 @@ public void load() { @Test public void get() { - assertNotNull(client.deploymentConfigs().inNamespace(currentNamespace).withName("deploymentconfig1").get()); + assertNotNull(client.deploymentConfigs().inNamespace(session.getNamespace()).withName("dc-get").get()); } @Test public void list() { - DeploymentConfigList aDeploymentConfigList = client.deploymentConfigs().inNamespace(currentNamespace).list(); + DeploymentConfigList aDeploymentConfigList = client.deploymentConfigs().inNamespace(session.getNamespace()).list(); assertThat(aDeploymentConfigList).isNotNull(); - assertEquals(1, aDeploymentConfigList.getItems().size()); + assertTrue(aDeploymentConfigList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity deploymentConfigReady = new ReadyEntity<>(DeploymentConfig.class, client, "deploymentconfig1", currentNamespace); - deploymentConfig1 = client.deploymentConfigs().inNamespace(currentNamespace).withName("deploymentconfig1").edit() + ReadyEntity deploymentConfigReady = new ReadyEntity<>(DeploymentConfig.class, client, "dc-update", session.getNamespace()); + DeploymentConfig deploymentConfig1 = client.deploymentConfigs().inNamespace(session.getNamespace()).withName("dc-update").edit() .editSpec().withReplicas(3).endSpec().done(); await().atMost(60, TimeUnit.SECONDS).until(deploymentConfigReady); assertThat(deploymentConfig1).isNotNull(); @@ -107,18 +82,11 @@ public void update() { } @Test - public void delete() throws InterruptedException { - ReadyEntity deploymentConfigReady = new ReadyEntity<>(DeploymentConfig.class, client, "deploymentconfig1", currentNamespace); + public void delete() { + ReadyEntity deploymentConfigReady = new ReadyEntity<>(DeploymentConfig.class, client, "dc-delete", session.getNamespace()); await().atMost(30, TimeUnit.SECONDS).until(deploymentConfigReady); - boolean bDeleted = client.deploymentConfigs().inNamespace(currentNamespace).withName("deploymentconfig1").delete(); + boolean bDeleted = client.deploymentConfigs().inNamespace(session.getNamespace()).withName("dc-delete").delete(); assertTrue(bDeleted); } - @After - public void cleanup() throws InterruptedException { - client.deploymentConfigs().inNamespace(currentNamespace).delete(); - - DeleteEntity deploymentConfigDelete = new DeleteEntity<>(DeploymentConfig.class, client, "deploymentconfig1", currentNamespace); - await().atMost(90, TimeUnit.SECONDS).until(deploymentConfigDelete); - } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/openshift/ImageStreamIT.java b/kubernetes-itests/src/test/java/io/fabric8/openshift/ImageStreamIT.java index aa99b7d7335..bd5336802d6 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/openshift/ImageStreamIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/openshift/ImageStreamIT.java @@ -16,18 +16,16 @@ package io.fabric8.openshift; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; import io.fabric8.openshift.api.model.ImageStream; -import io.fabric8.openshift.api.model.ImageStreamBuilder; import io.fabric8.openshift.api.model.ImageStreamList; import io.fabric8.openshift.client.OpenShiftClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.openshift.impl.requirement.RequiresOpenshift; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -48,46 +46,14 @@ public class ImageStreamIT { @ArquillianResource Session session; - private ImageStream imageStream1, imageStream2; - - private String currentNamespace; - - @Before - public void init() { - currentNamespace = session.getNamespace(); - imageStream1 = new ImageStreamBuilder() - .withNewMetadata() - .withName("example-camel-cdi") - .endMetadata() - .withNewSpec() - .addNewTag() - .withName("latest") - .endTag() - .withDockerImageRepository("fabric8/example-camel-cdi") - .endSpec() - .withNewStatus().withDockerImageRepository("").endStatus() - .build(); - - imageStream2 = new ImageStreamBuilder() - .withNewMetadata() - .withName("java-sti") - .endMetadata() - .withNewSpec() - .addNewTag() - .withName("latest") - .endTag() - .withDockerImageRepository("fabric8/java-sti") - .endSpec() - .withNewStatus().withDockerImageRepository("").endStatus() - .build(); - - client.imageStreams().inNamespace(currentNamespace).createOrReplace(imageStream1); - client.imageStreams().inNamespace(currentNamespace).createOrReplace(imageStream2); + @BeforeClass + public static void init() { + ClusterEntity.apply(ImageStreamIT.class.getResourceAsStream("/imagestream-it.yml")); } @Test public void load() { - ImageStream aImageStream = client.imageStreams().inNamespace(currentNamespace) + ImageStream aImageStream = client.imageStreams().inNamespace(session.getNamespace()) .load(getClass().getResourceAsStream("/test-imagestream.yml")).get(); assertThat(aImageStream).isNotNull(); assertEquals("my-ruby", aImageStream.getMetadata().getName()); @@ -95,22 +61,21 @@ public void load() { @Test public void get() { - assertNotNull(client.imageStreams().inNamespace(currentNamespace).withName("example-camel-cdi").get()); - assertNotNull(client.imageStreams().inNamespace(currentNamespace).withName("java-sti").get()); + assertNotNull(client.imageStreams().inNamespace(session.getNamespace()).withName("is-get").get()); } @Test public void list() { - ImageStreamList aImageStreamList = client.imageStreams().inNamespace(currentNamespace).list(); + ImageStreamList aImageStreamList = client.imageStreams().inNamespace(session.getNamespace()).list(); assertThat(aImageStreamList).isNotNull(); - assertEquals(2, aImageStreamList.getItems().size()); + assertTrue(aImageStreamList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity imageStreamReady = new ReadyEntity<>(ImageStream.class, client, "java-sti", this.currentNamespace); + ReadyEntity imageStreamReady = new ReadyEntity<>(ImageStream.class, client, "is-update", this.session.getNamespace()); await().atMost(30, TimeUnit.SECONDS).until(imageStreamReady); - imageStream1 = client.imageStreams().inNamespace(currentNamespace).withName("java-sti").edit() + ImageStream imageStream1 = client.imageStreams().inNamespace(session.getNamespace()).withName("is-update").edit() .editSpec().withDockerImageRepository("fabric8/s2i-java").endSpec() .done(); assertThat(imageStream1).isNotNull(); @@ -119,21 +84,10 @@ public void update() { @Test public void delete() { - ReadyEntity imageStreamReady = new ReadyEntity<>(ImageStream.class, client, "example-camel-cdi", this.currentNamespace); + ReadyEntity imageStreamReady = new ReadyEntity<>(ImageStream.class, client, "is-delete", this.session.getNamespace()); await().atMost(30, TimeUnit.SECONDS).until(imageStreamReady); - boolean bDeleted = client.imageStreams().inNamespace(currentNamespace).withName("example-camel-cdi").delete(); + boolean bDeleted = client.imageStreams().inNamespace(session.getNamespace()).withName("is-delete").delete(); assertTrue(bDeleted); } - @After - public void cleanup() throws InterruptedException { - if (client.imageStreams().inNamespace(currentNamespace).list().getItems().size()!= 0) { - client.imageStreams().inNamespace(currentNamespace).delete(); - } - - DeleteEntity imageStream1Delete = new DeleteEntity<>(ImageStream.class, client, "java-sti", this.currentNamespace); - DeleteEntity imageStream2Delete = new DeleteEntity<>(ImageStream.class, client, "example-camel-cdi", this.currentNamespace); - await().atMost(30, TimeUnit.SECONDS).until(imageStream1Delete); - await().atMost(30, TimeUnit.SECONDS).until(imageStream2Delete); - } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/openshift/ImageStreamTagIT.java b/kubernetes-itests/src/test/java/io/fabric8/openshift/ImageStreamTagIT.java index 88627fa46e5..d4f5fca8a41 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/openshift/ImageStreamTagIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/openshift/ImageStreamTagIT.java @@ -15,16 +15,24 @@ */ package io.fabric8.openshift; +import io.fabric8.commons.ClusterEntity; +import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ReadyEntity; import io.fabric8.openshift.api.model.*; +import io.fabric8.openshift.api.model.ImageStreamTagBuilder; import io.fabric8.openshift.client.OpenShiftClient; +import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.openshift.impl.requirement.RequiresOpenshift; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; +import java.util.Optional; +import java.util.concurrent.TimeUnit; + +import static org.awaitility.Awaitility.await; import static org.junit.Assert.*; @RunWith(ArquillianConditionalRunner.class) @@ -34,27 +42,16 @@ public class ImageStreamTagIT { @ArquillianResource OpenShiftClient client; - @Before - public void init() throws InterruptedException { - - ImageStreamTag istag = new ImageStreamTagBuilder().withNewMetadata().withName("bar1:1.0.12").endMetadata() - .withNewTag() - .withNewFrom() - .withKind("DockerImage") - .withName("openshift/wildfly-81-centos7:latest") - .endFrom() - .endTag() - .build(); - - client.imageStreamTags().create(istag); - - Thread.sleep(20000); + @ArquillianResource + Session session; + @BeforeClass + public static void init() { + ClusterEntity.apply(ImageStreamTagIT.class.getResourceAsStream("/imagestreamtag-it.yml")); } @Test public void load() { - ImageStreamTag loadedIST = client.imageStreamTags() .load(getClass().getResourceAsStream("/test-ist.yml")).get(); @@ -67,61 +64,62 @@ public void load() { @Test public void get() { - ImageStreamTag getIST = client.imageStreamTags() - .withName("bar1:1.0.12").get(); + ReadyEntity imageStreamTagReadyEntity = new ReadyEntity<>(ImageStreamTag.class, client, "get:1.0.12", session.getNamespace()); + await().atMost(30, TimeUnit.SECONDS).until(imageStreamTagReadyEntity); + ImageStreamTag getIST = client.imageStreamTags().inNamespace(session.getNamespace()) + .withName("get:1.0.12").get(); assertNotNull(getIST); - assertEquals("bar1:1.0.12", getIST.getMetadata().getName()); + assertEquals("get:1.0.12", getIST.getMetadata().getName()); assertEquals("DockerImage", getIST.getTag().getFrom().getKind()); - assertEquals("openshift/wildfly-81-centos7:latest", getIST.getTag().getFrom().getName()); + assertEquals("busybox:latest", getIST.getTag().getFrom().getName()); } @Test public void list() { - ImageStreamTagList istagList = client.imageStreamTags().list(); + ImageStreamTagList istagList = client.imageStreamTags().inNamespace(session.getNamespace()).list(); assertNotNull(istagList); - assertEquals(1,istagList.getItems().size()); - assertEquals("bar1:1.0.12", istagList.getItems().get(0).getMetadata().getName()); - assertEquals("DockerImage", istagList.getItems().get(0).getTag().getFrom().getKind()); - assertEquals("openshift/wildfly-81-centos7:latest", istagList.getItems().get(0).getTag().getFrom().getName()); + assertTrue(istagList.getItems().size() >= 1); + Optional imageStreamTag = istagList.getItems().stream().filter(i -> i.getMetadata().getName().equalsIgnoreCase("list:1.0.12")).findFirst(); + assertTrue(imageStreamTag.isPresent()); + assertEquals("list:1.0.12", imageStreamTag.get().getMetadata().getName()); + assertEquals("DockerImage", imageStreamTag.get().getTag().getFrom().getKind()); + assertEquals("busybox:latest", imageStreamTag.get().getTag().getFrom().getName()); } @Test - public void update(){ + public void update() { - ImageStreamTag istag2 = new ImageStreamTagBuilder().withNewMetadata().withName("bar1:1.0.12").endMetadata() + ReadyEntity imageStreamTagReadyEntity = new ReadyEntity<>(ImageStreamTag.class, client, "update:1.0.12", session.getNamespace()); + await().atMost(30, TimeUnit.SECONDS).until(imageStreamTagReadyEntity); + ImageStreamTag istag2 = new ImageStreamTagBuilder().withNewMetadata().withName("update:1.0.12").endMetadata() .withNewTag() .withNewFrom() .withKind("DockerImage") - .withName("openshift/wildfly-81-centos:latest") + .withName("busybox:latest") .endFrom() .endTag() .build(); - ImageStreamTag istag = client.imageStreamTags().withName("bar1:1.0.12").patch(istag2); + ImageStreamTag istag = client.imageStreamTags().inNamespace(session.getNamespace()).withName("update:1.0.12").patch(istag2); assertNotNull(istag); - assertEquals("bar1:1.0.12", istag.getMetadata().getName()); + assertEquals("update:1.0.12", istag.getMetadata().getName()); assertEquals("DockerImage", istag.getTag().getFrom().getKind()); - assertEquals("openshift/wildfly-81-centos:latest", istag.getTag().getFrom().getName()); + assertEquals("busybox:latest", istag.getTag().getFrom().getName()); } @Test - public void delete(){ - - boolean deleted = client.imageStreamTags().withName("bar1:1.0.12").delete(); + public void delete() { + ReadyEntity imageStreamTagReadyEntity = new ReadyEntity<>(ImageStreamTag.class, client, "delete:1.0.12", session.getNamespace()); + await().atMost(30, TimeUnit.SECONDS).until(imageStreamTagReadyEntity); + ImageStreamTagList imageStreamTagListOld = client.imageStreamTags().inNamespace(session.getNamespace()).list(); + boolean deleted = client.imageStreamTags().inNamespace(session.getNamespace()).withName("delete:1.0.12").delete(); assertTrue(deleted); - ImageStreamTagList istagList = client.imageStreamTags().list(); - assertEquals(0, istagList.getItems().size()); - } - - - @After - public void cleanup() { - - client.imageStreamTags().withName("bar1:1.0.12").delete(); + DeleteEntity deleteEntity = new DeleteEntity<>(ImageStreamTag.class, client, "delete:1.0.12", session.getNamespace()); + await().atMost(30, TimeUnit.SECONDS).until(deleteEntity); } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/openshift/RouteIT.java b/kubernetes-itests/src/test/java/io/fabric8/openshift/RouteIT.java index 786c80690c9..b04d01d6b57 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/openshift/RouteIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/openshift/RouteIT.java @@ -16,18 +16,17 @@ package io.fabric8.openshift; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; import io.fabric8.openshift.api.model.Route; -import io.fabric8.openshift.api.model.RouteBuilder; import io.fabric8.openshift.api.model.RouteList; import io.fabric8.openshift.client.OpenShiftClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.openshift.impl.requirement.RequiresOpenshift; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; @@ -48,24 +47,13 @@ public class RouteIT { @ArquillianResource Session session; - private Route route1, route2; + private Route route1; private String currentNamespace; - @Before - public void init() { - currentNamespace = session.getNamespace(); - route1 = new RouteBuilder() - .withNewMetadata().withName("route1").endMetadata() - .withNewSpec().withHost("www.example.com").withNewTo().withKind("Service").withName("service-name1").endTo().endSpec() - .build(); - route2 = new RouteBuilder() - .withNewMetadata().withName("route2").endMetadata() - .withNewSpec().withNewTo().withKind("Service").withName("service-name2").endTo().endSpec() - .build(); - - client.routes().inNamespace(currentNamespace).create(route1); - client.routes().inNamespace(currentNamespace).create(route2); + @BeforeClass + public static void init() { + ClusterEntity.apply(RouteIT.class.getResourceAsStream("/route-it.yml")); } @Test @@ -77,23 +65,21 @@ public void load() { @Test public void get() { - route1 = client.routes().inNamespace(currentNamespace).withName("route1").get(); + route1 = client.routes().inNamespace(currentNamespace).withName("route-get").get(); assertNotNull(route1); - route2 = client.routes().inNamespace(currentNamespace).withName("route2").get(); - assertNotNull(route2); } @Test public void list() { RouteList aRouteList = client.routes().inNamespace(currentNamespace).list(); assertThat(aRouteList).isNotNull(); - assertEquals(2, aRouteList.getItems().size()); + assertTrue(aRouteList.getItems().size() >= 1); } @Test public void update() { - ReadyEntity route1Ready = new ReadyEntity<>(Route.class, client, "route1", this.currentNamespace); - route1 = client.routes().inNamespace(currentNamespace).withName("route1").edit() + ReadyEntity route1Ready = new ReadyEntity<>(Route.class, client, "route-update", this.currentNamespace); + route1 = client.routes().inNamespace(currentNamespace).withName("route-update").edit() .editSpec().withPath("/test").endSpec().done(); await().atMost(30, TimeUnit.SECONDS).until(route1Ready); assertThat(route1).isNotNull(); @@ -102,23 +88,10 @@ public void update() { @Test public void delete() { - ReadyEntity route1Ready = new ReadyEntity<>(Route.class, client, "route1", this.currentNamespace); + ReadyEntity route1Ready = new ReadyEntity<>(Route.class, client, "route-delete", this.currentNamespace); await().atMost(30, TimeUnit.SECONDS).until(route1Ready); - boolean bDeleted = client.routes().inNamespace(currentNamespace).withName("route1").delete(); + boolean bDeleted = client.routes().inNamespace(currentNamespace).withName("route-delete").delete(); assertTrue(bDeleted); - } - @After - public void cleanup() throws InterruptedException { - if(client.routes().inNamespace(currentNamespace).list().getItems().size() != 0) { - client.routes().inNamespace(currentNamespace).delete(); - } - - DeleteEntity route1Delete = new DeleteEntity<>(Route.class, client, "route1", currentNamespace); - DeleteEntity route2Delete = new DeleteEntity<>(Route.class, client, "route2", currentNamespace); - - await().atMost(30, TimeUnit.SECONDS).until(route1Delete); - await().atMost(30, TimeUnit.SECONDS).until(route2Delete); - } } diff --git a/kubernetes-itests/src/test/java/io/fabric8/openshift/SecurityContextConstraintsIT.java b/kubernetes-itests/src/test/java/io/fabric8/openshift/SecurityContextConstraintsIT.java index bd6ca22ede5..adf67cc5442 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/openshift/SecurityContextConstraintsIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/openshift/SecurityContextConstraintsIT.java @@ -16,23 +16,19 @@ package io.fabric8.openshift; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.openshift.api.model.SecurityContextConstraints; -import io.fabric8.openshift.api.model.SecurityContextConstraintsBuilder; import io.fabric8.openshift.api.model.SecurityContextConstraintsList; import io.fabric8.openshift.client.OpenShiftClient; import org.arquillian.cube.openshift.impl.requirement.RequiresOpenshift; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import java.util.Collections; -import java.util.concurrent.TimeUnit; -import static org.awaitility.Awaitility.await; import static org.junit.Assert.*; @RunWith(ArquillianConditionalRunner.class) @@ -44,31 +40,9 @@ public class SecurityContextConstraintsIT { private SecurityContextConstraints scc; - @Before - public void init(){ - - scc = new SecurityContextConstraintsBuilder() - .withNewMetadata().withName("test-scc") - .addToLabels("foo","bar") - .endMetadata() - .withAllowPrivilegedContainer(true) - .withNewRunAsUser() - .withType("RunAsAny") - .endRunAsUser() - .withNewSeLinuxContext() - .withType("RunAsAny") - .endSeLinuxContext() - .withNewFsGroup() - .withType("RunAsAny") - .endFsGroup() - .withNewSupplementalGroups() - .withType("RunAsAny") - .endSupplementalGroups() - .addToUsers("admin") - .addToGroups("admin-group") - .build(); - - scc = client.securityContextConstraints().create(scc); + @BeforeClass + public static void init() { + ClusterEntity.apply(SecurityContextConstraintsIT.class.getResourceAsStream("/securitycontextconstraints-it.yml")); } @Test @@ -92,9 +66,9 @@ public void load() { public void get() { SecurityContextConstraints getSCC = client.securityContextConstraints() - .withName("test-scc").get(); + .withName("scc-get").get(); assertNotNull(getSCC); - assertEquals("test-scc",getSCC.getMetadata().getName()); + assertEquals("scc-get",getSCC.getMetadata().getName()); assertTrue(getSCC.getAllowPrivilegedContainer()); assertEquals("RunAsAny",getSCC.getRunAsUser().getType()); assertEquals("RunAsAny",getSCC.getFsGroup().getType()); @@ -114,7 +88,7 @@ public void list() { assertNotNull(sccList); assertEquals(1,sccList.getItems().size()); - assertEquals("test-scc",sccList.getItems().get(0).getMetadata().getName()); + assertEquals("scc-list",sccList.getItems().get(0).getMetadata().getName()); assertTrue(sccList.getItems().get(0).getAllowPrivilegedContainer()); assertEquals("RunAsAny",sccList.getItems().get(0).getRunAsUser().getType()); assertEquals("RunAsAny",sccList.getItems().get(0).getFsGroup().getType()); @@ -129,12 +103,12 @@ public void list() { @Test public void update(){ - scc = client.securityContextConstraints().withName("test-scc").edit() + scc = client.securityContextConstraints().withName("scc-update").edit() .withAllowPrivilegedContainer(false) .done(); assertNotNull(scc); - assertEquals("test-scc",scc.getMetadata().getName()); + assertEquals("scc-update",scc.getMetadata().getName()); assertFalse(scc.getAllowPrivilegedContainer()); assertEquals("RunAsAny",scc.getRunAsUser().getType()); assertEquals("RunAsAny",scc.getFsGroup().getType()); @@ -147,22 +121,12 @@ public void update(){ } @Test - public void delete(){ + public void delete() { + scc = client.securityContextConstraints().withName("scc-delete").get(); boolean deleted = client.securityContextConstraints().delete(scc); assertTrue(deleted); SecurityContextConstraintsList sccList = client.securityContextConstraints().list(); assertFalse(sccList.getItems().contains(scc)); } - - @After - public void cleanup() { - if (client.securityContextConstraints().list().getItems().size()!= 0) { - client.securityContextConstraints().withName("test-scc").delete(); - } - - DeleteEntity sccDelete = new DeleteEntity<>(SecurityContextConstraints.class, client, "test-scc", null); - await().atMost(30, TimeUnit.SECONDS).until(sccDelete); - } - } diff --git a/kubernetes-itests/src/test/java/io/fabric8/openshift/ServiceToURLIT.java b/kubernetes-itests/src/test/java/io/fabric8/openshift/ServiceToURLIT.java new file mode 100644 index 00000000000..ab6cd8cd675 --- /dev/null +++ b/kubernetes-itests/src/test/java/io/fabric8/openshift/ServiceToURLIT.java @@ -0,0 +1,123 @@ +/** + * Copyright (C) 2015 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.openshift; + +import io.fabric8.kubernetes.api.model.IntOrString; +import io.fabric8.kubernetes.api.model.Service; +import io.fabric8.kubernetes.api.model.ServiceBuilder; +import io.fabric8.kubernetes.api.model.extensions.Ingress; +import io.fabric8.openshift.api.model.RouteBuilder; +import io.fabric8.openshift.client.OpenShiftClient; +import org.arquillian.cube.kubernetes.api.Session; +import org.arquillian.cube.openshift.impl.requirement.RequiresOpenshift; +import org.arquillian.cube.requirement.ArquillianConditionalRunner; +import org.jboss.arquillian.test.api.ArquillianResource; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; + +import java.util.Collections; + +import static junit.framework.TestCase.assertNotNull; + +@RunWith(ArquillianConditionalRunner.class) +@RequiresOpenshift +public class ServiceToURLIT { + @ArquillianResource + OpenShiftClient client; + + @ArquillianResource + Session session; + + private String currentNamespace; + + @Before + public void init() { + currentNamespace = session.getNamespace(); + Service svc1 = new ServiceBuilder() + .withNewMetadata() + .withName("svc1") + .endMetadata() + .withNewSpec() + .withSelector(Collections.singletonMap("app", "MyApp")) + .addNewPort() + .withName("http") + .withProtocol("TCP") + .withPort(80) + .withTargetPort(new IntOrString(9376)) + .endPort() + .withType("LoadBalancer") + .endSpec() + .withNewStatus() + .withNewLoadBalancer() + .addNewIngress() + .withIp("146.148.47.155") + .endIngress() + .endLoadBalancer() + .endStatus() + .build(); + Service svc2 = new ServiceBuilder() + .withNewMetadata().withName("svc2").endMetadata() + .withNewSpec().withType("ExternalName").withExternalName("my.database.example.com") + .addNewPort().withName("80").withProtocol("TCP").withPort(80).endPort() + .endSpec() + .withNewStatus() + .withNewLoadBalancer() + .addNewIngress() + .withIp("146.148.47.155") + .endIngress() + .endLoadBalancer() + .endStatus() + .build(); + + client.services().inNamespace(currentNamespace).createOrReplace(svc1); + client.services().inNamespace(currentNamespace).createOrReplace(svc2); + } + + @Test + public void getURL() { + // Testing NodePort Impl + String url = client.services().inNamespace(currentNamespace).withName("svc1").getURL("http"); + assertNotNull(url); + + // Testing Ingress Impl + Ingress ingress = client.extensions().ingresses().load(getClass().getResourceAsStream("/test-ingress.yml")).get(); + client.extensions().ingresses().inNamespace(currentNamespace).create(ingress); + + url = client.services().inNamespace(currentNamespace).withName("svc2").getURL("80"); + assertNotNull(url); + + // Testing OpenShift Route Impl + Service svc3 = client.services().inNamespace(currentNamespace).create(new ServiceBuilder() + .withNewMetadata().withName("svc3").endMetadata() + .withNewSpec() + .addNewPort().withName("80").withProtocol("TCP").withPort(80).endPort() + .endSpec() + .build()); + + OpenShiftClient openshiftClient = client.adapt(OpenShiftClient.class); + openshiftClient.routes().inNamespace(currentNamespace).create(new RouteBuilder() + .withNewMetadata().withName(svc3.getMetadata().getName()).endMetadata() + .withNewSpec() + .withHost("www.example.com") + .withNewTo().withName(svc3.getMetadata().getName()).withKind("Service").endTo() + .endSpec() + .build()); + + url = client.services().inNamespace(currentNamespace).withName("svc3").getURL("80"); + assertNotNull(url); + } +} diff --git a/kubernetes-itests/src/test/java/io/fabric8/openshift/TemplateIT.java b/kubernetes-itests/src/test/java/io/fabric8/openshift/TemplateIT.java index 69070854706..f5c60632b7b 100644 --- a/kubernetes-itests/src/test/java/io/fabric8/openshift/TemplateIT.java +++ b/kubernetes-itests/src/test/java/io/fabric8/openshift/TemplateIT.java @@ -16,28 +16,22 @@ package io.fabric8.openshift; -import io.fabric8.commons.DeleteEntity; +import io.fabric8.commons.ClusterEntity; import io.fabric8.commons.ReadyEntity; -import io.fabric8.kubernetes.api.model.Service; -import io.fabric8.kubernetes.api.model.ServiceBuilder; import io.fabric8.openshift.api.model.Template; -import io.fabric8.openshift.api.model.TemplateBuilder; import io.fabric8.openshift.api.model.TemplateList; import io.fabric8.openshift.client.OpenShiftClient; import org.arquillian.cube.kubernetes.api.Session; import org.arquillian.cube.openshift.impl.requirement.RequiresOpenshift; import org.arquillian.cube.requirement.ArquillianConditionalRunner; import org.jboss.arquillian.test.api.ArquillianResource; -import org.junit.After; -import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import java.util.Collections; import java.util.concurrent.TimeUnit; - -import static io.fabric8.kubernetes.client.utils.ReplaceValueStream.replaceValues; import static junit.framework.TestCase.assertEquals; import static junit.framework.TestCase.assertNotNull; import static org.assertj.core.api.AssertionsForClassTypes.assertThat; @@ -55,35 +49,16 @@ public class TemplateIT { private Template template1; - private String currentNamespace; - - @Before - public void init() { - currentNamespace = session.getNamespace(); - Service aService = new ServiceBuilder() - .withNewMetadata().withName("bar").endMetadata() - .withNewSpec() - .addNewPort() - .withPort(80).endPort() - .addToSelector("cheese", "edam") - .withType("ExternalName") - .endSpec() - .build(); - - template1 = new TemplateBuilder() - .withApiVersion("template.openshift.io/v1") - .withNewMetadata().withName("foo").endMetadata() - .addToObjects(aService) - .build(); - - client.templates().inNamespace(currentNamespace).create(template1); + @BeforeClass + public static void init() { + ClusterEntity.apply(TemplateIT.class.getResourceAsStream("/template-it.yml")); } @Test public void load() { Template template = client.templates() .withParameters(Collections.singletonMap("REDIS_PASSWORD", "secret")) - .inNamespace(currentNamespace) + .inNamespace(session.getNamespace()) .load(getClass().getResourceAsStream("/test-template.yml")).get(); assertThat(template).isNotNull(); assertEquals(1, template.getObjects().size()); @@ -91,32 +66,22 @@ public void load() { @Test public void get() { - template1 = client.templates().inNamespace(currentNamespace).withName("foo").get(); + template1 = client.templates().inNamespace(session.getNamespace()).withName("template-get").get(); assertNotNull(template1); } @Test public void list() { - TemplateList aList = client.templates().inNamespace(currentNamespace).list(); + TemplateList aList = client.templates().inNamespace(session.getNamespace()).list(); assertThat(aList).isNotNull(); - assertEquals(1, aList.getItems().size()); + assertTrue(aList.getItems().size() >= 1); } @Test public void delete() { - ReadyEntity