Skip to content

Commit

Permalink
Remove root project testing jar (#1439)
Browse files Browse the repository at this point in the history
When executing integration test code, most integrations expect a jar to be provisioned that contains all test code so that it may be loaded by the processing frameworks directly. Historically to support this we have packaged all integration test code into one big itest jar in the root project.

This PR removes that shared central jar. Projects now each build and use their own personal itest jar for use in integration tests. These jars may repackage code from other projects itest jars, but they are insulated within each project that makes use of them.
  • Loading branch information
jbaiera committed May 12, 2020
1 parent 124ff97 commit 73d261e
Show file tree
Hide file tree
Showing 10 changed files with 109 additions and 80 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ import org.gradle.api.tasks.bundling.Jar
import org.gradle.api.tasks.compile.JavaCompile
import org.gradle.api.tasks.javadoc.Javadoc
import org.gradle.api.tasks.testing.Test
import org.gradle.api.tasks.testing.TestReport
import org.gradle.external.javadoc.JavadocOutputLevel
import org.gradle.external.javadoc.MinimalJavadocOptions
import org.gradle.internal.jvm.Jvm
Expand Down Expand Up @@ -66,7 +65,6 @@ class BuildPlugin implements Plugin<Project> {
configureEclipse(project)
configureMaven(project)
configureIntegrationTestTask(project)
configureTestReports(project)
configurePrecommit(project)
configureDependenciesInfo(project)
}
Expand Down Expand Up @@ -321,12 +319,6 @@ class BuildPlugin implements Plugin<Project> {
testImplementation("org.locationtech.spatial4j:spatial4j:0.6")
testImplementation("com.vividsolutions:jts:1.13")

// TODO: Remove when we merge ITests to test dirs
itestCompile("org.apache.hadoop:hadoop-minikdc:${project.ext.minikdcVersion}") {
// For some reason, the dependencies that are pulled in with MiniKDC have multiple resource files
// that cause issues when they are loaded. We exclude the ldap schema data jar to get around this.
exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
}
itestImplementation(project.sourceSets.main.output)
itestImplementation(project.configurations.testImplementation)
itestImplementation(project.configurations.provided)
Expand Down Expand Up @@ -593,72 +585,52 @@ class BuildPlugin implements Plugin<Project> {
* @param project to be configured
*/
private static void configureIntegrationTestTask(Project project) {
Jar hadoopTestingJar = project.rootProject.tasks.findByName('hadoopTestingJar') as Jar
if (hadoopTestingJar == null) {
// jar used for testing Hadoop remotely (es-hadoop + tests)
hadoopTestingJar = project.rootProject.tasks.create('hadoopTestingJar', Jar)
hadoopTestingJar.dependsOn(project.rootProject.tasks.getByName('jar'))
hadoopTestingJar.classifier = 'testing'
project.logger.info("Created Remote Testing Jar")
}
if (project != project.rootProject) {
TaskProvider<Task> itestJar = project.tasks.register('itestJar', Jar) { Jar itestJar ->
itestJar.dependsOn(project.tasks.getByName('jar'))
itestJar.getArchiveClassifier().set('testing')

// Add this project's classes to the testing uber-jar
itestJar.from(project.sourceSets.main.output)
itestJar.from(project.sourceSets.test.output)
itestJar.from(project.sourceSets.itest.output)
}

// Add this project's classes to the testing uber-jar
hadoopTestingJar.from(project.sourceSets.test.output)
hadoopTestingJar.from(project.sourceSets.main.output)
hadoopTestingJar.from(project.sourceSets.itest.output)

Test integrationTest = project.tasks.create('integrationTest', RestTestRunnerTask.class)
integrationTest.dependsOn(hadoopTestingJar)

integrationTest.testClassesDirs = project.sourceSets.itest.output.classesDirs
integrationTest.classpath = project.sourceSets.itest.runtimeClasspath
integrationTest.excludes = ["**/Abstract*.class"]

integrationTest.ignoreFailures = false

integrationTest.executable = "${project.ext.get('runtimeJavaHome')}/bin/java"
integrationTest.minHeapSize = "256m"
integrationTest.maxHeapSize = "2g"

integrationTest.testLogging {
displayGranularity 0
events "started", "failed" //, "standardOut", "standardError"
exceptionFormat "full"
showCauses true
showExceptions true
showStackTraces true
stackTraceFilters "groovy"
minGranularity 2
maxGranularity 2
}
Test integrationTest = project.tasks.create('integrationTest', RestTestRunnerTask.class)
integrationTest.dependsOn(itestJar)

integrationTest.testClassesDirs = project.sourceSets.itest.output.classesDirs
integrationTest.classpath = project.sourceSets.itest.runtimeClasspath
integrationTest.excludes = ["**/Abstract*.class"]

integrationTest.ignoreFailures = false

integrationTest.executable = "${project.ext.get('runtimeJavaHome')}/bin/java"
integrationTest.minHeapSize = "256m"
integrationTest.maxHeapSize = "2g"

integrationTest.testLogging {
displayGranularity 0
events "started", "failed" //, "standardOut", "standardError"
exceptionFormat "full"
showCauses true
showExceptions true
showStackTraces true
stackTraceFilters "groovy"
minGranularity 2
maxGranularity 2
}

integrationTest.reports.html.enabled = false
integrationTest.reports.html.enabled = false

// Only add cluster settings if it's not the root project
if (project != project.rootProject) {
// Only add cluster settings if it's not the root project
project.logger.info "Configuring ${project.name} integrationTest task to use ES Fixture"
// Create the cluster fixture around the integration test.
// There's probably a more elegant way to do this in Gradle
project.plugins.apply("es.hadoop.cluster")
}
}

/**
* Configure the root testReport task with the test tasks in this project to report on, creating the report task
* on root if it is not created yet.
* @param project to configure
*/
private static void configureTestReports(Project project) {
TestReport testReport = project.rootProject.getTasks().findByName('testReport') as TestReport
if (testReport == null) {
// Create the task on root if it is not created yet.
testReport = project.rootProject.getTasks().create('testReport', TestReport.class)
testReport.setDestinationDir(project.rootProject.file("${project.rootProject.getBuildDir()}/reports/allTests"))
}
testReport.reportOn(project.getTasks().getByName('test'))
testReport.reportOn(project.getTasks().getByName('integrationTest'))
}

/**
* @param project that belongs to a git repo
* @return the file containing the hash for the current branch
Expand Down
8 changes: 8 additions & 0 deletions hive/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,14 @@ jar {
}
}

itestJar {
from(zipTree(project(":elasticsearch-hadoop-mr").jar.archivePath)) {
include "org/elasticsearch/hadoop/**"
include "esh-build.properties"
include "META-INF/services/*"
}
}

javadoc {
source += project(":elasticsearch-hadoop-mr").sourceSets.main.allJava
classpath += files(project(":elasticsearch-hadoop-mr").sourceSets.main.compileClasspath)
Expand Down
5 changes: 5 additions & 0 deletions mr/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,11 @@ dependencies {
testImplementation(project.ext.hadoopClient)
testImplementation("io.netty:netty-all:4.0.29.Final")
testImplementation("org.elasticsearch:securemock:1.2")
itestImplementation("org.apache.hadoop:hadoop-minikdc:${project.ext.minikdcVersion}") {
// For some reason, the dependencies that are pulled in with MiniKDC have multiple resource files
// that cause issues when they are loaded. We exclude the ldap schema data jar to get around this.
exclude group: "org.apache.directory.api", module: "api-ldap-schema-data"
}
}

String generatedResources = "$buildDir/generated-resources/main"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ public abstract class Provisioner {
// init ES-Hadoop JAR
// expect the jar under build\libs
try {
File folder = new File(".." + File.separator + "build" + File.separator + "libs" + File.separator).getCanonicalFile();
File folder = new File("build" + File.separator + "libs" + File.separator).getCanonicalFile();
// find proper jar
File[] files = folder.listFiles(new FileFilter() {

Expand Down
8 changes: 8 additions & 0 deletions pig/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,14 @@ jar {
}
}

itestJar {
from(zipTree(project(":elasticsearch-hadoop-mr").jar.archivePath)) {
include "org/elasticsearch/hadoop/**"
include "esh-build.properties"
include "META-INF/services/*"
}
}

javadoc {
source += project(":elasticsearch-hadoop-mr").sourceSets.main.allJava
classpath += files(project(":elasticsearch-hadoop-mr").sourceSets.main.compileClasspath)
Expand Down
40 changes: 26 additions & 14 deletions qa/kerberos/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ if (disableTests) {

// Build uber storm jar for testing Storm remotely (es-hadoop + es-storm + qa tests)
Jar qaKerberosStormJar = project.tasks.create('kerberosStormJar', Jar)
qaKerberosStormJar.dependsOn(project.rootProject.tasks.getByName('jar'))
qaKerberosStormJar.dependsOn(project.rootProject.tasks.getByName('hadoopTestingJar'))
qaKerberosStormJar.dependsOn(project(':elasticsearch-storm').tasks.getByName('jar'))
qaKerberosStormJar.dependsOn(project(':elasticsearch-storm').tasks.getByName('itestJar'))
qaKerberosStormJar.classifier = 'storm-testing'

// Add projects to the storm testing uber-jar
Expand Down Expand Up @@ -233,8 +233,14 @@ if (disableTests) {

// Fixtures will be depending on the jar and test jar artifacts
def jar = project.tasks.getByName('jar') as org.gradle.jvm.tasks.Jar
def testingJar = project.rootProject.tasks.findByName('hadoopTestingJar') as Jar

def kerberosItestJar = project.tasks.findByName('itestJar') as Jar
def mrJar = project(':elasticsearch-hadoop-mr').tasks.getByName('jar') as Jar
def mrItestJar = project(':elasticsearch-hadoop-mr').tasks.getByName('itestJar') as Jar
def hiveItestJar = project(':elasticsearch-hadoop-hive').tasks.getByName('itestJar') as Jar
def pigItestJar = project(':elasticsearch-hadoop-pig').tasks.getByName('itestJar') as Jar
def sparkItestJar = project(':elasticsearch-spark-20').tasks.getByName('itestJar') as Jar
def stormItestJar = project(':elasticsearch-storm').tasks.getByName('itestJar') as Jar

// Need these for SSL items, test data, and scripts
File resourceDir = project.sourceSets.main.resources.getSrcDirs().head()
File mrItestResourceDir = project(":elasticsearch-hadoop-mr").sourceSets.itest.resources.getSrcDirs().head()
Expand Down Expand Up @@ -330,7 +336,7 @@ if (disableTests) {
// Add the ES-Hadoop jar to the resource manager classpath so that it can load the token renewer implementation
// for ES tokens. Otherwise, tokens may not be cancelled at the end of the job.
s.role('resourcemanager') { RoleConfiguration r ->
r.addEnvironmentVariable('YARN_USER_CLASSPATH', testingJar.archivePath.toString())
r.addEnvironmentVariable('YARN_USER_CLASSPATH', mrJar.archivePath.toString())
r.settingsFile('yarn-site.xml') { SettingsContainer.FileSettings f ->
// Add settings specifically for ES Node to allow for cancelling the tokens
f.addSetting('es.nodes', esAddress)
Expand Down Expand Up @@ -359,7 +365,13 @@ if (disableTests) {
s.addSetting('es.nodes', esAddress)
}
config.addDependency(jar)
config.addDependency(testingJar)
config.addDependency(kerberosItestJar)
config.addDependency(mrJar)
config.addDependency(mrItestJar)
config.addDependency(hiveItestJar)
config.addDependency(pigItestJar)
config.addDependency(sparkItestJar)
config.addDependency(stormItestJar)

// We need to create a tmp directory in hadoop before history server does, because history server will set permissions
// wrong.
Expand Down Expand Up @@ -435,7 +447,7 @@ if (disableTests) {
useCluster(testClusters.integTest)
dependsOn(copyData, setupUsers)
jobJar = jar.archivePath
libJars(testingJar.archivePath)
libJars(kerberosItestJar.archivePath, mrItestJar.archivePath)
jobClass = 'org.elasticsearch.hadoop.qa.kerberos.mr.LoadToES'
jobSettings([
'es.resource': 'qa_kerberos_mr_data',
Expand All @@ -462,7 +474,7 @@ if (disableTests) {
useCluster(testClusters.integTest)
dependsOn(mrLoadData)
jobJar = jar.archivePath
libJars(testingJar.archivePath)
libJars(kerberosItestJar.archivePath, mrItestJar.archivePath)
jobClass = 'org.elasticsearch.hadoop.qa.kerberos.mr.ReadFromES'
jobSettings([
'es.resource': 'qa_kerberos_mr_data',
Expand Down Expand Up @@ -495,7 +507,7 @@ if (disableTests) {
// principal = clientPrincipal + realm
// keytab = clientKeytab.toString()
jobJar = jar.archivePath
libJars(testingJar.archivePath)
libJars(kerberosItestJar.archivePath, sparkItestJar.archivePath)
jobClass = 'org.elasticsearch.hadoop.qa.kerberos.spark.LoadToES'
jobSettings([
'spark.es.resource': 'qa_kerberos_spark_data',
Expand Down Expand Up @@ -523,7 +535,7 @@ if (disableTests) {
// principal = clientPrincipal + realm
// keytab = clientKeytab.toString()
jobJar = jar.archivePath
libJars(testingJar.archivePath)
libJars(kerberosItestJar.archivePath, sparkItestJar.archivePath)
jobClass = 'org.elasticsearch.hadoop.qa.kerberos.spark.ReadFromES'
jobSettings([
'spark.es.resource': 'qa_kerberos_spark_data',
Expand Down Expand Up @@ -565,7 +577,7 @@ if (disableTests) {
dependsOn(jar, setupUsers, copyData, patchBeeline)
hivePrincipal = hivePrincipalName + realm
script = new File(resourceDir, 'hive/load_to_es.sql')
libJars(testingJar.archivePath)
libJars(kerberosItestJar.archivePath, hiveItestJar.archivePath)
environmentVariables.putAll([
'HADOOP_CLIENT_OPTS':
"-Djava.security.krb5.conf=${krb5Conf.toString()} " +
Expand All @@ -582,7 +594,7 @@ if (disableTests) {
dependsOn(hiveLoadData)
hivePrincipal = hivePrincipalName + realm
script = new File(resourceDir, 'hive/read_from_es.sql')
libJars(testingJar.archivePath)
libJars(kerberosItestJar.archivePath, hiveItestJar.archivePath)
environmentVariables.putAll([
'HADOOP_CLIENT_OPTS':
"-Djava.security.krb5.conf=${krb5Conf.toString()} " +
Expand All @@ -602,7 +614,7 @@ if (disableTests) {
useCluster(testClusters.integTest)
dependsOn(jar, setupUsers, copyData)
script = new File(resourceDir, 'pig/load_to_es.pig')
libJars(testingJar.archivePath)
libJars(kerberosItestJar.archivePath, pigItestJar.archivePath)
environmentVariables.putAll([
'PIG_OPTS': "-Djava.security.krb5.conf=${krb5Conf.toString()}"
])
Expand All @@ -614,7 +626,7 @@ if (disableTests) {
useCluster(testClusters.integTest)
dependsOn(pigLoadData)
script = new File(resourceDir, 'pig/read_from_es.pig')
libJars(testingJar.archivePath)
libJars(kerberosItestJar.archivePath, pigItestJar.archivePath)
environmentVariables.putAll([
'PIG_OPTS': "-Djava.security.krb5.conf=${krb5Conf.toString()}"
])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ public abstract class SparkUtils {
// init ES-Hadoop JAR
// expect the jar under build\libs
try {
File folder = new File(".." + File.separator + ".." + File.separator + "build" + File.separator + "libs" + File.separator).getCanonicalFile();
File folder = new File("build" + File.separator + "libs" + File.separator).getCanonicalFile();
System.out.println(folder.getAbsolutePath());
// find proper jar
File[] files = folder.listFiles(new FileFilter() {
Expand Down
8 changes: 8 additions & 0 deletions spark/sql-13/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,14 @@ jar {
}
}

itestJar {
from(zipTree(project(":elasticsearch-hadoop-mr").jar.archivePath)) {
include "org/elasticsearch/hadoop/**"
include "esh-build.properties"
include "META-INF/services/*"
}
}

javadoc {
if (project.ext.scalaMajorVersion != '2.10') {
dependsOn compileScala
Expand Down
8 changes: 8 additions & 0 deletions spark/sql-20/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,14 @@ jar {
}
}

itestJar {
from(zipTree(project(":elasticsearch-hadoop-mr").jar.archivePath)) {
include "org/elasticsearch/hadoop/**"
include "esh-build.properties"
include "META-INF/services/*"
}
}

javadoc {
if (project.ext.scalaMajorVersion != '2.10') {
dependsOn compileScala
Expand Down
8 changes: 8 additions & 0 deletions storm/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,14 @@ jar {
}
}

itestJar {
from(zipTree(project(":elasticsearch-hadoop-mr").jar.archivePath)) {
include "org/elasticsearch/hadoop/**"
include "esh-build.properties"
include "META-INF/services/*"
}
}

javadoc {
source += project(":elasticsearch-hadoop-mr").sourceSets.main.allJava
classpath += files(project(":elasticsearch-hadoop-mr").sourceSets.main.compileClasspath)
Expand Down

0 comments on commit 73d261e

Please sign in to comment.