diff --git a/.sdkmanrc b/.sdkmanrc index 59f306d13ad..0262a261019 100644 --- a/.sdkmanrc +++ b/.sdkmanrc @@ -1,3 +1,3 @@ # Enable auto-env through the sdkman_auto_env config # Add key=value pairs of SDKs to use below -java=11.0.10.hs-adpt +java=11.0.11.hs-adpt diff --git a/.travis.yml b/.travis.yml index 3c098c36611..de4c5e9920d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -40,21 +40,18 @@ env: - >- BUILD_TYPE=centaurHoricromtalPapiV2beta BUILD_MYSQL=5.7 - - >- - BUILD_TYPE=centaurHoricromtalPapiV2beta - BUILD_MARIADB=10.3 - >- BUILD_TYPE=centaurHoricromtalEngineUpgradePapiV2alpha1 BUILD_MYSQL=5.7 - - >- - BUILD_TYPE=centaurHoricromtalEngineUpgradePapiV2alpha1 - BUILD_MARIADB=10.3 - >- BUILD_TYPE=centaurPapiUpgradePapiV2alpha1 BUILD_MYSQL=5.7 - >- BUILD_TYPE=centaurPapiUpgradeNewWorkflowsPapiV2alpha1 BUILD_MYSQL=5.7 + - >- + BUILD_TYPE=centaurLocal + BUILD_MARIADB=10.3 - >- BUILD_TYPE=centaurLocal BUILD_MYSQL=5.7 @@ -78,7 +75,6 @@ env: BUILD_MYSQL=5.7 - >- BUILD_TYPE=checkPublish - BUILD_MYSQL=5.7 - >- BUILD_TYPE=conformanceLocal BUILD_MYSQL=5.7 diff --git a/CromIAM/src/main/resources/sentry.properties b/CromIAM/src/main/resources/sentry.properties new file mode 100644 index 00000000000..ebc5aa32687 --- /dev/null +++ b/CromIAM/src/main/resources/sentry.properties @@ -0,0 +1,2 @@ +# Quiet warnings about missing sentry DSNs by providing an empty string +dsn= diff --git a/centaur/test_cromwell.sh b/centaur/test_cromwell.sh index 44f9d22447b..0e3ab8c240b 100755 --- a/centaur/test_cromwell.sh +++ b/centaur/test_cromwell.sh @@ -124,11 +124,11 @@ cd "${RUN_DIR}" TEST_STATUS="failed" if [[ "${CENTAUR_SBT_COVERAGE}" == "true" ]]; then - sbt -Dsbt.supershell=false --warn coverage centaur/it:compile - CP=$(sbt -no-colors --error coverage "export centaur/it:dependencyClasspath") + sbt -Dsbt.supershell=false --warn coverage centaur/IntegrationTest/compile + CP=$(sbt -no-colors --error coverage "export centaur/IntegrationTest/dependencyClasspath") else - sbt -Dsbt.supershell=false --warn centaur/it:compile - CP=$(sbt -no-colors --error "export centaur/it:dependencyClasspath") + sbt -Dsbt.supershell=false --warn centaur/IntegrationTest/compile + CP=$(sbt -no-colors --error "export centaur/IntegrationTest/dependencyClasspath") fi # Add the it-classes folder to the classpath to ensure logback configuration files are picked up. diff --git a/codegen_java/build.sbt b/codegen_java/build.sbt index ab435f749ef..d9af475ae89 100644 --- a/codegen_java/build.sbt +++ b/codegen_java/build.sbt @@ -6,7 +6,7 @@ lazy val root = (project in file(".")). Seq(organization := "org.broadinstitute.cromwell", name := "cromwell-client", version := createVersion("0.1"), - scalaVersion := "2.12.12", // scala-steward:off (CROM-6777) - 2.12.13 blocked by duplicate import of nowarn + scalaVersion := "2.12.14", scalacOptions ++= Seq("-feature"), compile / javacOptions ++= Seq("-Xlint:deprecation"), Compile / packageDoc / publishArtifact := false, diff --git a/codegen_java/project/build.properties b/codegen_java/project/build.properties index dbae93bcfd5..10fd9eee04a 100644 --- a/codegen_java/project/build.properties +++ b/codegen_java/project/build.properties @@ -1 +1 @@ -sbt.version=1.4.9 +sbt.version=1.5.5 diff --git a/database/sql/src/main/scala/cromwell/database/slick/SlickDatabase.scala b/database/sql/src/main/scala/cromwell/database/slick/SlickDatabase.scala index 037fd0fe540..e54974f6618 100644 --- a/database/sql/src/main/scala/cromwell/database/slick/SlickDatabase.scala +++ b/database/sql/src/main/scala/cromwell/database/slick/SlickDatabase.scala @@ -228,7 +228,7 @@ abstract class SlickDatabase(override val originalDatabaseConfig: Config) extend Instead resorting to reflection. */ val message = pSQLException.getServerErrorMessage - val field = classOf[ServerErrorMessage].getDeclaredField("m_mesgParts") + val field = classOf[ServerErrorMessage].getDeclaredField("mesgParts") field.setAccessible(true) val parts = field.get(message).asInstanceOf[java.util.Map[Character, String]] parts.remove('D') diff --git a/database/sql/src/main/scala/cromwell/database/sql/SqlConverters.scala b/database/sql/src/main/scala/cromwell/database/sql/SqlConverters.scala index fe1bf5f7dba..6d734bdea94 100644 --- a/database/sql/src/main/scala/cromwell/database/sql/SqlConverters.scala +++ b/database/sql/src/main/scala/cromwell/database/sql/SqlConverters.scala @@ -14,11 +14,12 @@ object SqlConverters { // https://github.com/slick/slick/issues/1026 implicit class TimestampToSystemOffsetDateTime(val timestamp: Timestamp) extends AnyVal { - def toSystemOffsetDateTime = timestamp.toLocalDateTime.atZone(ZoneId.systemDefault).toOffsetDateTime + def toSystemOffsetDateTime: OffsetDateTime = timestamp.toLocalDateTime.atZone(ZoneId.systemDefault).toOffsetDateTime } implicit class OffsetDateTimeToSystemTimestamp(val offsetDateTime: OffsetDateTime) extends AnyVal { - def toSystemTimestamp = Timestamp.valueOf(offsetDateTime.atZoneSameInstant(ZoneId.systemDefault).toLocalDateTime) + def toSystemTimestamp: Timestamp = + Timestamp.valueOf(offsetDateTime.atZoneSameInstant(ZoneId.systemDefault).toLocalDateTime) } implicit class ClobOptionToRawString(val clobOption: Option[Clob]) extends AnyVal { @@ -56,10 +57,11 @@ object SqlConverters { import eu.timepit.refined.api.Refined import eu.timepit.refined.collection.NonEmpty - def toClobOption: Option[SerialClob] = if (str.isEmpty) None else Option(new SerialClob(str.toCharArray)) + def toClobOption: Option[SerialClob] = + if (str == null || str.isEmpty) None else Option(new SerialClob(str.toCharArray)) def toClob(default: String Refined NonEmpty): SerialClob = { - val nonEmpty = if (str.isEmpty) default.value else str + val nonEmpty = if (str == null || str.isEmpty) default.value else str new SerialClob(nonEmpty.toCharArray) } } @@ -91,7 +93,7 @@ object SqlConverters { } implicit class BytesToBlobOption(val bytes: Array[Byte]) extends AnyVal { - def toBlobOption: Option[SerialBlob] = if (bytes.isEmpty) None else Option(new SerialBlob(bytes)) + def toBlobOption: Option[SerialBlob] = if (bytes == null || bytes.isEmpty) None else Option(new SerialBlob(bytes)) } implicit class EnhancedFiniteDuration(val duration: FiniteDuration) extends AnyVal { diff --git a/engine/src/test/scala/cromwell/engine/workflow/WorkflowDockerLookupActorSpec.scala b/engine/src/test/scala/cromwell/engine/workflow/WorkflowDockerLookupActorSpec.scala index ca87f0355c1..e238336b37d 100644 --- a/engine/src/test/scala/cromwell/engine/workflow/WorkflowDockerLookupActorSpec.scala +++ b/engine/src/test/scala/cromwell/engine/workflow/WorkflowDockerLookupActorSpec.scala @@ -36,13 +36,22 @@ class WorkflowDockerLookupActorSpec with Mockito { var workflowId: WorkflowId = _ + var dockerSendingActor: TestProbe = _ var dockerHashingActor: TestProbe = _ var numReads: Int = _ var numWrites: Int = _ before { workflowId = WorkflowId.randomId() - dockerHashingActor = TestProbe(s"test-probe-$workflowId") + /* + Instead of TestKit.self use a custom global sender that we reset before each test. + + Otherwise, a latent failure/timeout from one test may be sent to the shared Testkit.self during a different test. + In that case a call to expectMsg() will suddenly receive an unexpected result. This especially happens in slow + running CI where the entire suite takes a few minutes to run. + */ + dockerSendingActor = TestProbe(s"test-sending-probe-$workflowId") + dockerHashingActor = TestProbe(s"test-hashing-probe-$workflowId") numReads = 0 numWrites = 0 } @@ -50,8 +59,11 @@ class WorkflowDockerLookupActorSpec it should "wait and resubmit the docker request when it gets a backpressure message" in { val backoff = SimpleExponentialBackoff(2.seconds, 10.minutes, 2D) - val lookupActor = TestActorRef(Props(new TestWorkflowDockerLookupActor(workflowId, dockerHashingActor.ref, Submitted, backoff)), self) - lookupActor ! LatestRequest + val lookupActor = TestActorRef( + Props(new TestWorkflowDockerLookupActor(workflowId, dockerHashingActor.ref, Submitted, backoff)), + dockerSendingActor.ref, + ) + lookupActor.tell(LatestRequest, dockerSendingActor.ref) dockerHashingActor.expectMsg(LatestRequest) dockerHashingActor.reply(BackPressure(LatestRequest)) @@ -66,28 +78,28 @@ class WorkflowDockerLookupActorSpec } val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = false, db)) - lookupActor ! LatestRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) // The WorkflowDockerLookupActor should not have the hash for this tag yet and will need to query the dockerHashingActor. dockerHashingActor.expectMsg(LatestRequest) dockerHashingActor.reply(LatestSuccessResponse) // The WorkflowDockerLookupActor should forward the success message to this actor. - expectMsg(LatestSuccessResponse) + dockerSendingActor.expectMsg(LatestSuccessResponse) numWrites should equal(1) // Now the WorkflowDockerLookupActor should now have this hash in its mappings and should not query the dockerHashingActor again. - lookupActor ! LatestRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) dockerHashingActor.expectNoMessage() // The WorkflowDockerLookupActor should forward the success message to this actor. - expectMsg(LatestSuccessResponse) + dockerSendingActor.expectMsg(LatestSuccessResponse) numWrites should equal(1) } it should "soldier on after docker hashing actor timeouts" in { val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = false)) - lookupActor ! LatestRequest - lookupActor ! OlderRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) + lookupActor.tell(OlderRequest, dockerSendingActor.ref) val timeout = DockerHashActorTimeout(LatestRequest) @@ -100,7 +112,7 @@ class WorkflowDockerLookupActorSpec // DockerHashActor for this hash again. dockerHashingActor.reply(OlderSuccessResponse) - val results = receiveN(2, 2 seconds).toSet + val results = dockerSendingActor.receiveN(2, 2 seconds).toSet val failedRequests = results collect { case f: WorkflowDockerLookupFailure if f.request == LatestRequest => f.request } @@ -108,12 +120,12 @@ class WorkflowDockerLookupActorSpec failedRequests should equal(Set(LatestRequest)) // Try again. The hashing actor should receive the latest message and this time won't time out. - lookupActor ! LatestRequest - lookupActor ! OlderRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) + lookupActor.tell(OlderRequest, dockerSendingActor.ref) dockerHashingActor.expectMsg(LatestRequest) dockerHashingActor.reply(LatestSuccessResponse) - val responses = receiveN(2, 2 seconds).toSet + val responses = dockerSendingActor.receiveN(2, 2 seconds).toSet val hashResponses = responses collect { case msg: DockerInfoSuccessResponse => msg } // Success after transient timeout failures: hashResponses should equal(Set(LatestSuccessResponse, OlderSuccessResponse)) @@ -123,7 +135,7 @@ class WorkflowDockerLookupActorSpec it should "not fail and enter terminal state when response for certain image id from DockerHashingActor arrived after the self-imposed timeout" in { val lookupActor = TestFSMRef(new WorkflowDockerLookupActor(workflowId, dockerHashingActor.ref, isRestart = false, EngineServicesStore.engineDatabaseInterface)) - lookupActor ! LatestRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) val timeout = DockerHashActorTimeout(LatestRequest) @@ -132,15 +144,15 @@ class WorkflowDockerLookupActorSpec // WorkflowDockerLookupActor actually sends DockerHashActorTimeout to itself lookupActor.tell(timeout, lookupActor) - val failedRequest: WorkflowDockerLookupFailure = receiveOne(2 seconds).asInstanceOf[WorkflowDockerLookupFailure] + val failedRequest: WorkflowDockerLookupFailure = dockerSendingActor.receiveOne(2 seconds).asInstanceOf[WorkflowDockerLookupFailure] failedRequest.request shouldBe LatestRequest - lookupActor ! LatestRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) dockerHashingActor.expectMsg(LatestRequest) dockerHashingActor.reply(LatestSuccessResponse) // responding for previously timeouted request dockerHashingActor.reply(LatestSuccessResponse) // responding for current request - val hashResponse = receiveOne(2 seconds) + val hashResponse = dockerSendingActor.receiveOne(2 seconds) hashResponse shouldBe LatestSuccessResponse // Give WorkflowDockerLookupActor a chance to finish its unfinished business @@ -151,8 +163,8 @@ class WorkflowDockerLookupActorSpec it should "respond appropriately to docker hash lookup failures" in { val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = false)) - lookupActor ! LatestRequest - lookupActor ! OlderRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) + lookupActor.tell(OlderRequest, dockerSendingActor.ref) // The WorkflowDockerLookupActor should not have the hash for this tag yet and will need to query the dockerHashingActor. dockerHashingActor.expectMsg(LatestRequest) @@ -162,7 +174,7 @@ class WorkflowDockerLookupActorSpec dockerHashingActor.reply(LatestSuccessResponse) dockerHashingActor.reply(olderFailedResponse) - val results = receiveN(2, 2 seconds).toSet + val results = dockerSendingActor.receiveN(2, 2 seconds).toSet val mixedResponses = results collect { case msg: DockerInfoSuccessResponse => msg // Scoop out the request here since we can't match the exception on the whole message. @@ -172,10 +184,10 @@ class WorkflowDockerLookupActorSpec Set(LatestSuccessResponse, OlderRequest) should equal(mixedResponses) // Try again, I have a good feeling about this. - lookupActor ! OlderRequest + lookupActor.tell(OlderRequest, dockerSendingActor.ref) dockerHashingActor.expectMsg(OlderRequest) dockerHashingActor.reply(OlderSuccessResponse) - expectMsg(OlderSuccessResponse) + dockerSendingActor.expectMsg(OlderSuccessResponse) } it should "reuse previously looked up hashes following a restart" in { @@ -186,12 +198,12 @@ class WorkflowDockerLookupActorSpec val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = true, db)) - lookupActor ! LatestRequest - lookupActor ! OlderRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) + lookupActor.tell(OlderRequest, dockerSendingActor.ref) dockerHashingActor.expectNoMessage() - val results = receiveN(2, 2 seconds).toSet + val results = dockerSendingActor.receiveN(2, 2 seconds).toSet val successes = results collect { case result: DockerInfoSuccessResponse => result } successes should equal(Set(LatestSuccessResponse, OlderSuccessResponse)) @@ -201,15 +213,15 @@ class WorkflowDockerLookupActorSpec val db = dbWithWrite(Future.successful(())) val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = false, db)) - lookupActor ! LatestRequest - lookupActor ! OlderRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) + lookupActor.tell(OlderRequest, dockerSendingActor.ref) dockerHashingActor.expectMsg(LatestRequest) dockerHashingActor.expectMsg(OlderRequest) dockerHashingActor.reply(LatestSuccessResponse) dockerHashingActor.reply(OlderSuccessResponse) - val results = receiveN(2, 2 seconds).toSet + val results = dockerSendingActor.receiveN(2, 2 seconds).toSet val successes = results collect { case result: DockerInfoSuccessResponse => result } successes should equal(Set(LatestSuccessResponse, OlderSuccessResponse)) @@ -222,21 +234,21 @@ class WorkflowDockerLookupActorSpec } val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = false, db)) - lookupActor ! LatestRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) // The WorkflowDockerLookupActor should not have the hash for this tag yet and will need to query the dockerHashingActor. dockerHashingActor.expectMsg(LatestRequest) dockerHashingActor.reply(LatestSuccessResponse) // The WorkflowDockerLookupActor is going to fail when it tries to write to that broken DB. - expectMsgClass(classOf[WorkflowDockerLookupFailure]) + dockerSendingActor.expectMsgClass(classOf[WorkflowDockerLookupFailure]) numWrites should equal(1) - lookupActor ! LatestRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) // The WorkflowDockerLookupActor will query the dockerHashingActor again. dockerHashingActor.expectMsg(LatestRequest) dockerHashingActor.reply(LatestSuccessResponse) // The WorkflowDockerLookupActor should forward the success message to this actor. - expectMsg(LatestSuccessResponse) + dockerSendingActor.expectMsg(LatestSuccessResponse) numWrites should equal(2) } @@ -247,10 +259,10 @@ class WorkflowDockerLookupActorSpec } val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = true, db)) - lookupActor ! LatestRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) dockerHashingActor.expectNoMessage() - expectMsgClass(classOf[WorkflowDockerTerminalFailure]) + dockerSendingActor.expectMsgClass(classOf[WorkflowDockerTerminalFailure]) numReads should equal(1) } @@ -265,10 +277,10 @@ class WorkflowDockerLookupActorSpec } val lookupActor = TestActorRef(WorkflowDockerLookupActor.props(workflowId, dockerHashingActor.ref, isRestart = true, db)) - lookupActor ! LatestRequest + lookupActor.tell(LatestRequest, dockerSendingActor.ref) dockerHashingActor.expectNoMessage() - expectMsgClass(classOf[WorkflowDockerTerminalFailure]) + dockerSendingActor.expectMsgClass(classOf[WorkflowDockerTerminalFailure]) numReads should equal(1) } diff --git a/filesystems/oss/src/main/scala/cromwell/filesystems/oss/batch/OssBatchIoCommand.scala b/filesystems/oss/src/main/scala/cromwell/filesystems/oss/batch/OssBatchIoCommand.scala index 9c7eb1b0b11..625a068a126 100644 --- a/filesystems/oss/src/main/scala/cromwell/filesystems/oss/batch/OssBatchIoCommand.scala +++ b/filesystems/oss/src/main/scala/cromwell/filesystems/oss/batch/OssBatchIoCommand.scala @@ -57,7 +57,10 @@ case class OssBatchDeleteCommand( override val file: OssPath, override val swallowIOExceptions: Boolean ) extends IoDeleteCommand(file, swallowIOExceptions) with OssBatchIoCommand[Unit, Void] { - def operation: Unit = file.ossClient.deleteObject(file.bucket, file.key) + def operation: Unit = { + file.ossClient.deleteObject(file.bucket, file.key) + () + } override protected def mapOssResponse(response: Void): Unit = () override def commandDescription: String = s"OssBatchDeleteCommand file '$file' swallowIOExceptions '$swallowIOExceptions'" } diff --git a/filesystems/oss/src/test/scala/cromwell/filesystems/oss/nio/OssNioUtilSpec.scala b/filesystems/oss/src/test/scala/cromwell/filesystems/oss/nio/OssNioUtilSpec.scala index b1ec9f08042..b01772e8170 100644 --- a/filesystems/oss/src/test/scala/cromwell/filesystems/oss/nio/OssNioUtilSpec.scala +++ b/filesystems/oss/src/test/scala/cromwell/filesystems/oss/nio/OssNioUtilSpec.scala @@ -98,6 +98,7 @@ trait OssNioUtilSpec extends AnyFlatSpecLike with CromwellTimeoutSpec with Mocki OssStorageRetry.from( () => ossClient.deleteObject(path.bucket, path.key) ) + () } def writeObject(path: OssStoragePath): Unit = { diff --git a/filesystems/s3/src/main/java/org/lerch/s3fs/S3FileSystemProvider.java b/filesystems/s3/src/main/java/org/lerch/s3fs/S3FileSystemProvider.java index 0095e372a43..1da53532d2b 100644 --- a/filesystems/s3/src/main/java/org/lerch/s3fs/S3FileSystemProvider.java +++ b/filesystems/s3/src/main/java/org/lerch/s3fs/S3FileSystemProvider.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.io.InputStream; +import java.lang.reflect.InvocationTargetException; import java.net.URI; import java.nio.channels.FileChannel; import java.nio.channels.SeekableByteChannel; @@ -440,9 +441,10 @@ public void copy(Path source, Path target, CopyOption... options) throws IOExcep s3Source.getFileSystem() .getClient() .copyObject(CopyObjectRequest.builder() - .copySource(bucketNameOrigin + "/" + keySource) - .bucket(bucketNameTarget) - .key(keyTarget) + .sourceBucket(bucketNameOrigin) + .sourceKey(keySource) + .destinationBucket(bucketNameTarget) + .destinationKey(keyTarget) .build()); } } @@ -497,10 +499,11 @@ private void multiPartCopy(S3Path source, long objectSize, S3Path target, CopyOp CompletableFuture uploadPartCopyResponseFuture = CompletableFuture.supplyAsync(() -> { final UploadPartCopyRequest uploadPartCopyRequest = UploadPartCopyRequest.builder() .uploadId(uploadId) - .copySource(source.getFileStore().name() + "/" + source.getKey()) + .sourceBucket(source.getFileStore().name()) + .sourceKey(source.getKey()) .copySourceRange("bytes=" + finalBytePosition + "-" + lastByte) - .bucket(target.getFileStore().name()) - .key(target.getKey()) + .destinationBucket(target.getFileStore().name()) + .destinationKey(target.getKey()) .partNumber(finalPartNum) .build(); UploadPartCopyResponse uploadPartCopyResponse = s3Client.uploadPartCopy(uploadPartCopyRequest); @@ -765,8 +768,13 @@ protected AmazonS3Factory getAmazonS3Factory(Properties props) { if (props.containsKey(AMAZON_S3_FACTORY_CLASS)) { String amazonS3FactoryClass = props.getProperty(AMAZON_S3_FACTORY_CLASS); try { - return (AmazonS3Factory) Class.forName(amazonS3FactoryClass).newInstance(); - } catch (InstantiationException | IllegalAccessException | ClassNotFoundException | ClassCastException e) { + return (AmazonS3Factory) Class.forName(amazonS3FactoryClass).getDeclaredConstructor().newInstance(); + } catch (InstantiationException + | IllegalAccessException + | ClassNotFoundException + | ClassCastException + | NoSuchMethodException + | InvocationTargetException e) { throw new S3FileSystemConfigurationException("Configuration problem, couldn't instantiate AmazonS3Factory (" + amazonS3FactoryClass + "): ", e); } } diff --git a/filesystems/s3/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider b/filesystems/s3/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider index 3a259796ab2..66974158b24 100644 --- a/filesystems/s3/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider +++ b/filesystems/s3/src/main/resources/META-INF/services/java.nio.file.spi.FileSystemProvider @@ -1,2 +1,2 @@ # if not present, FileSystems.newFileSystem throw NotProviderFoundException -com.upplication.s3fs.S3FileSystemProvider +org.lerch.s3fs.S3FileSystemProvider diff --git a/project/Dependencies.scala b/project/Dependencies.scala index c3127269699..8a2dd1b870c 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -1,120 +1,117 @@ import sbt._ object Dependencies { - private val akkaHttpCirceIntegrationV = "1.35.3" - private val akkaHttpV = "10.1.12" // scala-steward:off (CROM-6619) - private val akkaV = "2.5.31" // scala-steward:off (CROM-6637) + private val akkaHttpCirceIntegrationV = "1.37.0" + private val akkaHttpV = "10.1.14" // scala-steward:off (CROM-6619) + private val akkaV = "2.5.32" // scala-steward:off (CROM-6637) private val aliyunBcsV = "6.2.4" - private val aliyunCoreV = "4.5.20" - private val aliyunCrV = "4.1.1" - private val aliyunOssV = "3.11.3" - private val ammoniteOpsV = "2.3.8" - private val apacheCommonNetV = "3.8.0" + private val aliyunCoreV = "4.5.25" + private val aliyunCrV = "4.1.2" + private val aliyunOssV = "3.13.1" + private val ammoniteOpsV = "2.4.0" private val apacheHttpClientV = "4.5.13" - private val awsSdkV = "2.15.41" // scala-steward:off (CROM-6776) + private val awsSdkV = "2.17.29" private val betterFilesV = "3.9.1" - private val catsEffectV = "2.3.3" - private val catsV = "2.3.1" + /* + cats-effect, fs2, http4s, and sttp (also to v3) should all be upgraded at the same time to use cats-effect 3.x. + */ + private val catsEffectV = "2.5.3" // scala-steward:off (CROM-6564) + private val catsV = "2.6.1" private val circeConfigV = "0.8.0" - private val circeGenericExtrasV = "0.13.0" - private val circeOpticsV = "0.13.0" - private val circeV = "0.13.0" - private val circeYamlV = "0.13.1" - private val commonsCodecV = "1.15" - private val commonsIoV = "2.8.0" - private val commonsLang3V = "3.11" + private val circeGenericExtrasV = "0.14.1" + private val circeOpticsV = "0.14.1" + private val circeV = "0.14.1" + private val circeYamlV = "0.14.1" + private val commonsCodecV = "1.15" // via: https://commons.apache.org/proper/commons-codec/ + private val commonsCsvV = "1.9.0" + private val commonsIoV = "2.11.0" // via: https://commons.apache.org/proper/commons-io/ + private val commonsLang3V = "3.12.0" private val commonsMathV = "3.6.1" + private val commonNetV = "3.8.0" // via: https://commons.apache.org/proper/commons-net/ private val commonsTextV = "1.9" - private val configsV = "0.6.0" + private val configsV = "0.6.1" private val delightRhinoSandboxV = "0.0.15" + private val diffsonSprayJsonV = "4.1.1" private val ficusV = "1.5.0" // The "com.vladsch.flexmark" % "flexmark-profile-pegdown" % flexmarkV dependency is an implicit, version-specific // runtime dependency of ScalaTest. At the time of this writing this is the newest version known to work. private val flexmarkV = "0.36.8" // scala-steward:off - private val fs2V = "2.5.4" + private val fs2V = "2.5.9" // scala-steward:off (CROM-6564) // Scala Steward opened PR #5775 titled "Update fs2-io from 2.0.1 to 2.4.3" to upgrade the following dependency. // However that PR was actually attempting an upgrade from 1.0.5 to 2.4.3 which is a much more significant // undertaking, resulting in some thoroughly broken statsd proxy code. As this component lacks tests and is // probably not the most important corner of the Cromwell repo, going to punt on this for now. + // NOTE: fs2VStatsDProxy usage within allProjectDependencies breaks sbt whatDependsOn. If you want to use + // whatDependsOn temporarily set fs2VStatsDProxy to fs2V and revert back before trying to compile. private val fs2VStatsDProxy = "1.0.5" // scala-steward:off - private val googleApiClientV = "1.31.3" - private val googleCloudBigQueryV = "1.127.11" - private val googleCloudKmsV = "v1-rev20210312-1.31.0" - private val googleCloudMonitoringV = "2.0.14" - private val googleCloudNioV = "0.61.0-alpha" // scala-steward:off - private val googleCloudStorageV = "1.113.14" - private val googleGaxGrpcV = "1.62.0" - private val googleGenomicsServicesV2Alpha1ApiV = "v2alpha1-rev20210322-1.31.0" + private val googleApiClientV = "1.32.1" + private val googleCloudBigQueryV = "2.1.4" + // latest date via: https://mvnrepository.com/artifact/com.google.apis/google-api-services-cloudkms + private val googleCloudKmsV = "v1-rev20210812-1.32.1" + private val googleCloudMonitoringV = "3.0.2" + private val googleCloudNioV = "0.123.8" + private val googleCloudStorageV = "2.1.0" + private val googleGaxGrpcV = "2.4.0" + // latest date via: https://mvnrepository.com/artifact/com.google.apis/google-api-services-genomics + private val googleGenomicsServicesV2Alpha1ApiV = "v2alpha1-rev20210811-1.32.1" private val googleHttpClientApacheV = "2.1.2" private val googleHttpClientV = "1.38.0" - private val googleLifeSciencesServicesV2BetaApiV = "v2beta-rev20210319-1.31.0" - private val googleOauth2V = "0.22.2" - private val googleOauthClientV = "1.31.4" - private val googleCloudResourceManagerV = "0.87.0-alpha" - private val grpcV = "1.36.1" + // latest date via: https://mvnrepository.com/artifact/com.google.apis/google-api-services-lifesciences + private val googleLifeSciencesServicesV2BetaApiV = "v2beta-rev20210813-1.32.1" + private val googleOauth2V = "1.1.0" + private val googleOauthClientV = "1.32.1" + private val googleCloudResourceManagerV = "1.0.4" + private val grpcV = "1.40.1" private val guavaV = "30.1.1-jre" private val heterodonV = "1.0.0-beta3" - private val hsqldbV = "2.5.1" - private val http4sVersion = "0.21.7" // scala-steward:off (CROM-6678) - private val jacksonV = "2.12.2" - private val jacksonJqV = "1.0.0-preview.20201123" - private val janinoV = "3.1.3" - private val javaxActivationV = "1.2.0" - // jaxb-impl 2.3.3 depends on com.sun.activation:jakarta.activation and jakarta.xml.bind:jakarta.xml.bind-api, - // which jaxb-impl 2.3.2 did not. jakarta.activation corresponds to the "updated" Maven coordinates for the Java - // Activation Framework (https://wiki.eclipse.org/Jakarta_EE_Maven_Coordinates), but Cromwell has many transitive - // dependencies on the "old" javax.activation coordinates as well. At assembly time the classes from these two - // different jars collide and produce merge conflicts. - // It's possible that after updating all other dependencies in Cromwell we might purge the transitive dependencies on - // javax.activation and then be able to upgrade to jaxb-impl 2.3.3 or beyond, but some of those other dependencies - // such as googleCloudNioV have already been pinned for Scala Steward so this might not be a trivial undertaking. - private val jaxbV = "2.3.2" // scala-steward:off - private val kindProjectorV = "0.9.9" - private val kittensV = "2.2.1" + private val hsqldbV = "2.6.0" + private val http4sV = "0.21.7" // scala-steward:off (CROM-6678) + private val jacksonV = "2.12.5" + private val janinoV = "3.1.6" + private val jsr305V = "3.0.2" + private val kindProjectorV = "0.9.10" + private val kittensV = "2.3.2" private val liquibaseSlf4jV = "4.0.0" - // Scala Steward wanted to upgrade liquibase-core to 3.10.2 but that version does not find some uniqueness - // constraints and models datatypes in ways that are incompatible with our test expectations. - // liquibase-core 4.0.0 did not have either of those problems but produced tons of strange warnings at runtime - // similar in form to this: https://github.com/liquibase/liquibase/issues/1294 - // Pinning Liquibase version for the time being. - private val liquibaseV = "3.6.3" // scala-steward:off - private val logbackV = "1.2.3" - private val lz4JavaV = "1.7.1" - private val mariadbV = "2.7.2" - private val metrics3ScalaV = "4.0.0" // https://github.com/erikvanoosten/metrics-scala/tree/f733e26#download-4x + private val liquibaseV = "4.4.0" // 4.4.1+ needs https://github.com/liquibase/liquibase/pull/2001 + private val logbackV = "1.2.5" + private val lz4JavaV = "1.8.0" + private val mariadbV = "2.7.4" + /* + The StatsD reporter for DropWizard's (Code Hale's) Metrics 3.x still works with Metrics 4.x. + Still would be great to move to Prometheus / OpenCensus + */ + private val metrics4ScalaV = "4.1.19" private val metrics3StatsdV = "4.2.0" private val mockFtpServerV = "2.8.0" - private val mockserverNettyV = "5.5.4" // scala-steward:off (CROM-6582) - private val mouseV = "1.0.0" - private val mysqlV = "8.0.22" // scala-steward:off (CROM-6772: DateTime/String conversion and connection string updates required) - private val nettyV = "4.1.46.Final" - private val owlApiV = "5.1.16" // scala-steward:off (CROM-6677) + private val mockserverNettyV = "5.11.2" + private val mouseV = "1.0.4" + private val mysqlV = "8.0.26" + private val nettyV = "4.1.66.Final" + private val owlApiV = "5.1.19" private val paradiseV = "2.1.1" private val pegdownV = "1.6.0" - // For org.postgresql:postgresql 42.2.6 - 42.2.14: - // java.lang.NoSuchFieldException: m_mesgParts in KeyValueSpec "fail if one of the inserts fails" - private val postgresV = "42.2.5" // scala-steward:off - private val rdf4jV = "2.4.2" - private val refinedV = "0.9.22" + private val postgresV = "42.2.23" + private val pprintV = "0.6.6" + private val rdf4jV = "3.7.1" + private val refinedV = "0.9.27" private val rhinoV = "1.7.13" + private val scalaCollectionCompatV = "2.5.0" private val scalaGraphV = "1.13.1" - private val scalaLoggingV = "3.9.3" + private val scalaLoggingV = "3.9.4" private val scalaPoolV = "0.4.3" - private val scalacheckV = "1.15.3" - private val scalacticV = "3.2.6" + private val scalacheckV = "1.15.4" + private val scalacticV = "3.2.9" private val scalameterV = "0.19" private val scalamockV = "5.1.0" - private val scalatestV = "3.2.6" + private val scalatestV = "3.2.9" private val scalatestPlusMockitoV = "1.0.0-M2" - private val scalazV = "7.3.3" + private val scalazV = "7.3.5" private val scoptV = "4.0.1" - private val sentryLogbackV = "1.7.30" // scala-steward:off (CROM-6640) - private val shapelessV = "2.3.3" + private val sentryLogbackV = "5.1.2" + private val shapelessV = "2.3.7" private val simulacrumV = "1.0.1" - private val slf4jV = "1.7.30" + private val slf4jV = "1.7.32" private val slickCatsV = "0.10.4" - private val testContainersScalaV = "0.38.9" - /* If you're about to update our Slick version: * Consider checking whether the new Slick version passes tests with upserts enabled (eg KeyValueDatabaseSpec) * @@ -124,17 +121,18 @@ object Dependencies { * Related Slick PR: https://github.com/slick/slick/pull/2101 */ private val slickV = "3.3.2-2076hotfix" // scala-steward:off (CROM-6620) - private val snakeyamlV = "1.28" - private val specs2MockV = "4.10.6" + private val snakeyamlV = "1.29" + private val specs2MockV = "4.12.3" private val sprayJsonV = "1.3.6" private val sttpV = "1.5.19" // scala-steward:off (CROM-6564) - private val swaggerParserV = "1.0.54" + private val swaggerParserV = "1.0.55" private val swaggerUiV = "3.23.11" // scala-steward:off (CROM-6621) - private val tikaV = "1.25" // scala-steward:off (CROM-6771) - javax vs jaxb + private val testContainersScalaV = "0.39.6" + private val tikaV = "2.1.0" private val typesafeConfigV = "1.4.1" - private val workbenchGoogleV = "0.15-2fc79a3" - private val workbenchModelV = "0.14-27810079-SNAP" - private val workbenchUtilV = "0.6-27810079-SNAP" + private val workbenchGoogleV = "0.21-5c9c4f6" // via: https://github.com/broadinstitute/workbench-libs/blob/develop/google/CHANGELOG.md + private val workbenchModelV = "0.14-89d0d9e" // via: https://github.com/broadinstitute/workbench-libs/blob/develop/model/CHANGELOG.md + private val workbenchUtilV = "0.6-d7ed6bf" // via: https://github.com/broadinstitute/workbench-libs/blob/develop/util/CHANGELOG.md private val slf4jFacadeDependencies = List( "org.slf4j" % "slf4j-api" % slf4jV, @@ -162,9 +160,9 @@ object Dependencies { ) private val http4sDependencies = List( - "org.http4s" %% "http4s-dsl" % http4sVersion, - "org.http4s" %% "http4s-blaze-client" % http4sVersion, - "org.http4s" %% "http4s-circe" % http4sVersion + "org.http4s" %% "http4s-dsl" % http4sV, + "org.http4s" %% "http4s-blaze-client" % http4sV, + "org.http4s" %% "http4s-circe" % http4sV, ) private val googleApiClientDependencies = List( @@ -180,7 +178,7 @@ object Dependencies { "com.google.cloud" % "google-cloud-resourcemanager" % googleCloudResourceManagerV, ) - val spiDependencies = List( + val spiDependencies: List[ModuleID] = List( "com.iheart" %% "ficus" % ficusV, ) ++ googleApiClientDependencies ++ slf4jFacadeDependencies @@ -190,14 +188,14 @@ object Dependencies { ) val implFtpDependencies = List( - "commons-net" % "commons-net" % apacheCommonNetV, + "commons-net" % "commons-net" % commonNetV, "io.github.andrebeat" %% "scala-pool" % scalaPoolV, "com.google.guava" % "guava" % guavaV, "org.scalamock" %% "scalamock" % scalamockV % Test, "org.mockftpserver" % "MockFtpServer" % mockFtpServerV % Test ) - val implDrsDependencies = List( + val implDrsDependencies: List[ModuleID] = List( "org.apache.commons" % "commons-lang3" % commonsLang3V, "com.google.cloud" % "google-cloud-storage" % googleCloudStorageV, "com.google.oauth-client" % "google-oauth-client" % googleOauthClientV @@ -244,9 +242,6 @@ object Dependencies { private val liquibaseDependencies = List( "org.liquibase" % "liquibase-core" % liquibaseV, - // The exclusion below will be needed if / when liquibase-core is upgraded to 3.10+ - // Avert collision with jakarta.xml.bind-api - // exclude("javax.xml.bind", "jaxb-api"), // This is to stop liquibase from being so noisy by default // See: http://stackoverflow.com/questions/20880783/how-to-get-liquibase-to-log-using-slf4j "com.mattbertolini" % "liquibase-slf4j" % liquibaseSlf4jV @@ -297,7 +292,7 @@ object Dependencies { Instead the code has been re-forked into this repo, just like many of the other FileSystemProvider extensions. */ private val s3fsDependencies = List( - "com.google.code.findbugs" % "jsr305" % "3.0.2", + "com.google.code.findbugs" % "jsr305" % jsr305V, "com.google.guava" % "guava" % guavaV, "org.apache.tika" % "tika-core" % tikaV, "software.amazon.awssdk" % "s3" % awsSdkV, @@ -330,48 +325,16 @@ object Dependencies { private val aliyunOssDependencies = List( "com.aliyun.oss" % "aliyun-sdk-oss" % aliyunOssV - // stax is included twice by oss 3.1.0 and cause assembly merge conflicts via stax vs. javax.xml.stream - exclude("stax", "stax-api") - // Exclude jersey-json until aliyun-sdk-oss >3.4.0 is published - // https://github.com/aliyun/aliyun-oss-java-sdk/pull/149 - exclude("com.sun.jersey", "jersey-json") - // jaxb-api and jaxb-core and included in jaxb-impl as of 2.3.1 - // https://github.com/eclipse-ee4j/jaxb-ri/issues/1168 - exclude("javax.xml.bind", "jaxb-api") - exclude("com.sun.xml.bind", "jaxb-core") - // javax.activation:activation has been replaced. https://stackoverflow.com/a/46493809 - // The old version was causing an assembly merge conflict. - exclude("javax.activation", "activation"), - "com.sun.activation" % "javax.activation" % javaxActivationV, - "com.sun.xml.bind" % "jaxb-impl" % jaxbV, - "org.glassfish.jaxb" % "jaxb-runtime" % jaxbV - // already included in com.sun.activation - exclude("jakarta.activation", "jakarta.activation-api"), ) private val aliyunBatchComputeDependencies = List( "com.aliyun" % "aliyun-java-sdk-batchcompute" % aliyunBcsV, "com.aliyun" % "aliyun-java-sdk-core" % aliyunCoreV - // jaxb-api and jaxb-core and included in jaxb-impl as of 2.3.1 - // https://github.com/eclipse-ee4j/jaxb-ri/issues/1168 - exclude("javax.xml.bind", "jaxb-api") - exclude("com.sun.xml.bind", "jaxb-core") - // javax.activation:activation has been replaced. https://stackoverflow.com/a/46493809 - // The old version was causing an assembly merge conflict. - exclude("javax.activation", "activation"), - "com.sun.activation" % "javax.activation" % javaxActivationV, - "com.sun.xml.bind" % "jaxb-impl" % jaxbV, - "org.glassfish.jaxb" % "jaxb-runtime" % jaxbV - // already included in com.sun.activation - exclude("jakarta.activation", "jakarta.activation-api"), ) private val aliyunCrDependencies = List( "com.aliyun" % "aliyun-java-sdk-cr" % aliyunCrV, - "com.aliyun" % "aliyun-java-sdk-core" % aliyunCoreV - exclude("javax.xml.bind", "jaxb-api") - exclude("com.sun.xml.bind", "jaxb-core") - exclude("javax.activation", "activation"), + "com.aliyun" % "aliyun-java-sdk-core" % aliyunCoreV, "com.typesafe.akka" %% "akka-http-spray-json" % akkaHttpV ) @@ -388,21 +351,21 @@ object Dependencies { // Sub-project dependencies, added in addition to any dependencies inherited from .dependsOn(). - val commonDependencies = List( + val commonDependencies: List[ModuleID] = List( "org.typelevel" %% "cats-effect" % catsEffectV, "org.apache.commons" % "commons-lang3" % commonsLang3V, "org.apache.commons" % "commons-text" % commonsTextV, - "com.lihaoyi" %% "pprint" % "0.6.3", + "com.lihaoyi" %% "pprint" % pprintV, ) ++ catsDependencies ++ configDependencies ++ slf4jFacadeDependencies ++ refinedTypeDependenciesList - val cloudSupportDependencies = googleApiClientDependencies ++ googleCloudDependencies ++ betterFilesDependencies ++ awsCloudDependencies + val cloudSupportDependencies: List[ModuleID] = googleApiClientDependencies ++ googleCloudDependencies ++ betterFilesDependencies ++ awsCloudDependencies - val databaseSqlDependencies = List( + val databaseSqlDependencies: List[ModuleID] = List( "commons-io" % "commons-io" % commonsIoV, ) ++ configDependencies ++ catsDependencies ++ slickDependencies ++ dbmsDependencies ++ refinedTypeDependenciesList val statsDDependencies = List( - "nl.grons" %% "metrics-scala" % metrics3ScalaV, + "nl.grons" %% "metrics4-scala" % metrics4ScalaV, "com.readytalk" % "metrics3-statsd" % metrics3StatsdV ) @@ -410,29 +373,29 @@ object Dependencies { "com.google.cloud" % "google-cloud-monitoring" % googleCloudMonitoringV ) - val gcsFileSystemDependencies = akkaHttpDependencies + val gcsFileSystemDependencies: List[ModuleID] = akkaHttpDependencies - val httpFileSystemDependencies = akkaHttpDependencies + val httpFileSystemDependencies: List[ModuleID] = akkaHttpDependencies - val ossFileSystemDependencies = googleCloudDependencies ++ aliyunOssDependencies ++ List( + val ossFileSystemDependencies: List[ModuleID] = googleCloudDependencies ++ aliyunOssDependencies ++ List( "com.github.pathikrit" %% "better-files" % betterFilesV ) - val statsDProxyDependencies = List( + val statsDProxyDependencies: List[ModuleID] = List( "co.fs2" %% "fs2-io" % fs2VStatsDProxy, "com.iheart" %% "ficus" % ficusV, "com.google.cloud" % "google-cloud-nio" % googleCloudNioV ) ++ commonDependencies - val womDependencies = List( + val womDependencies: List[ModuleID] = List( "com.typesafe.scala-logging" %% "scala-logging" % scalaLoggingV, "io.spray" %% "spray-json" % sprayJsonV, "org.scalacheck" %% "scalacheck" % scalacheckV % Test, "org.typelevel" %% "simulacrum" % simulacrumV, - "commons-codec" % "commons-codec" % commonsCodecV, + "commons-codec" % "commons-codec" % commonsCodecV ) ++ circeDependencies ++ refinedTypeDependenciesList - val wdlDependencies = List( + val wdlDependencies: List[ModuleID] = List( "commons-io" % "commons-io" % commonsIoV, "org.scala-graph" %% "graph-core" % scalaGraphV, "com.chuusai" %% "shapeless" % shapelessV @@ -470,7 +433,7 @@ object Dependencies { "org.apache.httpcomponents" % "httpclient" % apacheHttpClientV ) - val cwlDependencies = List( + val cwlDependencies: List[ModuleID] = List( "com.lihaoyi" %% "ammonite-ops" % ammoniteOpsV, "org.broadinstitute" % "heterodon" % heterodonV classifier "single", "org.scalactic" %% "scalactic" % scalacticV, @@ -482,42 +445,42 @@ object Dependencies { "commons-io" % "commons-io" % commonsIoV % Test ) ++ betterFilesDependencies ++ owlApiDependencies - val womtoolDependencies = catsDependencies ++ slf4jBindingDependencies + val womtoolDependencies: List[ModuleID] = catsDependencies ++ slf4jBindingDependencies - val centaurCwlRunnerDependencies = List( + val centaurCwlRunnerDependencies: List[ModuleID] = List( "com.github.scopt" %% "scopt" % scoptV, "io.circe" %% "circe-optics" % circeOpticsV ) ++ slf4jBindingDependencies - val coreDependencies = List( + val coreDependencies: List[ModuleID] = List( "com.google.auth" % "google-auth-library-oauth2-http" % googleOauth2V, "com.chuusai" %% "shapeless" % shapelessV, "com.storm-enroute" %% "scalameter" % scalameterV % Test, "com.github.scopt" %% "scopt" % scoptV, - "org.scalamock" %% "scalamock" % scalamockV % Test, + "org.scalamock" %% "scalamock" % scalamockV % Test ) ++ akkaStreamDependencies ++ configDependencies ++ catsDependencies ++ circeDependencies ++ googleApiClientDependencies ++ statsDDependencies ++ betterFilesDependencies ++ // TODO: We're not using the "F" in slf4j. Core only supports logback, specifically the WorkflowLogger. slf4jBindingDependencies ++ stackdriverDependencies - val databaseMigrationDependencies = liquibaseDependencies ++ dbmsDependencies + val databaseMigrationDependencies: List[ModuleID] = liquibaseDependencies ++ dbmsDependencies - val dockerHashingDependencies = http4sDependencies ++ circeDependencies ++ aliyunCrDependencies + val dockerHashingDependencies: List[ModuleID] = http4sDependencies ++ circeDependencies ++ aliyunCrDependencies - val cromwellApiClientDependencies = List( + val cromwellApiClientDependencies: List[ModuleID] = List( "org.scalaz" %% "scalaz-core" % scalazV, "org.typelevel" %% "cats-effect" % catsEffectV, "co.fs2" %% "fs2-io" % fs2V % Test, ) ++ akkaHttpDependencies ++ betterFilesDependencies ++ catsDependencies - val centaurDependencies = List( + val centaurDependencies: List[ModuleID] = List( "org.apache.commons" % "commons-math3" % commonsMathV, "com.github.kxbmap" %% "configs" % configsV, "com.google.cloud" % "google-cloud-bigquery" % googleCloudBigQueryV % IntegrationTest, - "org.gnieh" %% "diffson-spray-json" % "4.0.3" + "org.gnieh" %% "diffson-spray-json" % diffsonSprayJsonV ) ++ circeDependencies ++ slf4jBindingDependencies ++ cloudSupportDependencies ++ http4sDependencies - val engineDependencies = List( + val engineDependencies: List[ModuleID] = List( "commons-codec" % "commons-codec" % commonsCodecV, "commons-io" % "commons-io" % commonsIoV, "com.storm-enroute" %% "scalameter" % scalameterV @@ -530,12 +493,12 @@ object Dependencies { val servicesDependencies = List( "com.google.api" % "gax-grpc" % googleGaxGrpcV, - "org.apache.commons" % "commons-csv" % "1.8", + "org.apache.commons" % "commons-csv" % commonsCsvV, ) - val serverDependencies = slf4jBindingDependencies + val serverDependencies: List[ModuleID] = slf4jBindingDependencies - val cromiamDependencies = List( + val cromiamDependencies: List[ModuleID] = List( "com.softwaremill.sttp" %% "core" % sttpV, "com.softwaremill.sttp" %% "async-http-client-backend-future" % sttpV, "com.typesafe.scala-logging" %% "scala-logging" % scalaLoggingV, @@ -543,20 +506,22 @@ object Dependencies { "org.broadinstitute.dsde.workbench" %% "workbench-util" % workbenchUtilV ) ++ akkaHttpDependencies ++ swaggerUiDependencies ++ slf4jBindingDependencies - val wes2cromwellDependencies = coreDependencies ++ akkaHttpDependencies + val wes2cromwellDependencies: List[ModuleID] = coreDependencies ++ akkaHttpDependencies val backendDependencies = List( "org.scalacheck" %% "scalacheck" % scalacheckV % Test, "co.fs2" %% "fs2-io" % fs2V ) - val bcsBackendDependencies = commonDependencies ++ refinedTypeDependenciesList ++ aliyunBatchComputeDependencies - val tesBackendDependencies = akkaHttpDependencies + val bcsBackendDependencies: List[ModuleID] = commonDependencies ++ refinedTypeDependenciesList ++ aliyunBatchComputeDependencies + + val tesBackendDependencies: List[ModuleID] = akkaHttpDependencies + val sfsBackendDependencies = List ( "org.lz4" % "lz4-java" % lz4JavaV ) - val testDependencies = List( + val testDependencies: List[ModuleID] = List( "org.scalatest" %% "scalatest" % scalatestV, "org.scalatestplus" %% "scalatestplus-mockito" % scalatestPlusMockitoV, "com.vladsch.flexmark" % "flexmark-profile-pegdown" % flexmarkV, @@ -572,12 +537,12 @@ object Dependencies { val paradisePlugin = "org.scalamacros" % "paradise" % paradiseV cross CrossVersion.full // Version of the swagger UI to write into config files - val swaggerUiVersion = swaggerUiV + val swaggerUiVersion: String = swaggerUiV - val perfDependencies = circeDependencies ++ betterFilesDependencies ++ commonDependencies ++ + val perfDependencies: List[ModuleID] = circeDependencies ++ betterFilesDependencies ++ commonDependencies ++ googleApiClientDependencies ++ googleCloudDependencies - val drsLocalizerDependencies = List( + val drsLocalizerDependencies: List[ModuleID] = List( "com.google.auth" % "google-auth-library-oauth2-http" % googleOauth2V, "com.google.cloud" % "google-cloud-storage" % googleCloudStorageV, "org.typelevel" %% "cats-effect" % catsEffectV, @@ -585,7 +550,7 @@ object Dependencies { "com.softwaremill.sttp" %% "circe" % sttpV ) ++ circeDependencies ++ catsDependencies ++ slf4jBindingDependencies ++ languageFactoryDependencies - val allProjectDependencies = + val allProjectDependencies: List[ModuleID] = backendDependencies ++ bcsBackendDependencies ++ centaurCwlRunnerDependencies ++ @@ -644,7 +609,7 @@ object Dependencies { "com.google.http-client" % "google-http-client" % googleHttpClientV, ) - val nettyDependencyOverrides = List( + val nettyDependencyOverrides: List[ModuleID] = List( "buffer", "codec", "codec-dns", @@ -660,7 +625,7 @@ object Dependencies { "transport-native-unix-common", ).map(m => "io.netty" % s"netty-$m" % nettyV) - val rdf4jDependencyOverrides = List( + val rdf4jDependencyOverrides: List[ModuleID] = List( /* Yes. All of these are required to lock in the rdf4j version. @@ -689,7 +654,7 @@ object Dependencies { ).map(m => "org.eclipse.rdf4j" % s"rdf4j-$m" % rdf4jV) // Some libraries are importing older version of these dependencies, causing conflicts. Hence the need to override them. - val grpcDependencyOverrides = List( + val grpcDependencyOverrides: List[ModuleID] = List( "alts", "auth", "context", @@ -701,14 +666,23 @@ object Dependencies { "stub", ).map(m => "io.grpc" % s"grpc-$m" % grpcV) + /* + Ensure we're using the latest to avoid a shading bug in earlier versions of scala-collection-compat. + https://github.com/scala/scala-collection-compat/issues/426 + */ + private val scalaCollectionCompatOverrides = List( + "org.scala-lang.modules" %% "scala-collection-compat" % scalaCollectionCompatV, + ) + /* If we use a version in one of our projects, that's the one we want all the libraries to use ...plus other groups of transitive dependencies shared across multiple projects */ - val cromwellDependencyOverrides = + val cromwellDependencyOverrides: List[ModuleID] = allProjectDependencies ++ googleHttpClientDependencies ++ nettyDependencyOverrides ++ rdf4jDependencyOverrides ++ - grpcDependencyOverrides + grpcDependencyOverrides ++ + scalaCollectionCompatOverrides } diff --git a/project/Merging.scala b/project/Merging.scala index 964d342a234..53135adce26 100644 --- a/project/Merging.scala +++ b/project/Merging.scala @@ -4,8 +4,8 @@ import sbtassembly.{MergeStrategy, PathList} object Merging { val customMergeStrategy: Def.Initialize[String => MergeStrategy] = Def.setting { - case PathList(ps@_*) if ps.last == "project.properties" => - // Merge/Filter project.properties files from Google jars that otherwise collide at merge time. + case PathList(ps@_*) if Set("project.properties", "execution.interceptors").contains(ps.last) => + // Merge/Filter files from AWS/Google jars that otherwise collide at merge time. MergeStrategy.filterDistinctLines case PathList(ps@_*) if ps.last == "logback.xml" => MergeStrategy.first @@ -16,12 +16,27 @@ object Merging { path map { _.toLowerCase } match { - case "spring.tooling" :: xs => + case "spring.tooling" :: _ => MergeStrategy.discard case "io.netty.versions.properties" :: Nil => MergeStrategy.first - case "maven" :: "com.google.guava" :: xs => + case "maven" :: "com.google.guava" :: _ => MergeStrategy.first + case "native-image" :: _ if Set("native-image.properties", "reflection-config.json").contains(path.last) => + /* + Discard GraalVM configuration files. + grpc-netty-shaded 1.39.0 tried to put the netty classes into a different package, but left the shaded version + of the config file with the same name as the unshaded netty library. Thus when merging the shaded and + unshaded netty jars we end up with assembly conflicts. + + However, we're not using GraalVM for execution so just discard the configuration files. + + See also: + - https://www.graalvm.org/reference-manual/native-image/BuildConfiguration/#configuration-file-format + - https://github.com/grpc/grpc-java/issues/7540 + - https://github.com/grpc/grpc-java/releases/tag/v1.39.0 + */ + MergeStrategy.discard case _ => val oldStrategy = (assembly / assemblyMergeStrategy).value oldStrategy(x) diff --git a/project/Publishing.scala b/project/Publishing.scala index b10b5ab9dda..2b70cd0e559 100644 --- a/project/Publishing.scala +++ b/project/Publishing.scala @@ -47,7 +47,7 @@ object Publishing { val artifact: File = assembly.value val artifactTargetPath = s"/app/${artifact.name}" val projectName = name.value - val additionalDockerInstr: Seq[Instruction] = dockerCustomSettings.value + val additionalDockerInstr: Seq[Instruction] = (dockerCustomSettings ?? Nil).value new Dockerfile { from("us.gcr.io/broad-dsp-gcr-public/base/jre:11-debian") @@ -101,7 +101,6 @@ object Publishing { cache = false, removeIntermediateContainers = BuildOptions.Remove.Always ), - ThisBuild / dockerCustomSettings := Nil // setting the default value ) def dockerPushSettings(pushEnabled: Boolean): Seq[Setting[_]] = { diff --git a/project/Settings.scala b/project/Settings.scala index de83feea4ce..8cdffade72c 100644 --- a/project/Settings.scala +++ b/project/Settings.scala @@ -82,13 +82,11 @@ object Settings { assembly / assemblyJarName := name.value + "-" + version.value + ".jar", assembly / test := {}, assembly / assemblyMergeStrategy := customMergeStrategy.value, - assembly / logLevel := - sys.env.get("CROMWELL_SBT_ASSEMBLY_LOG_LEVEL").flatMap(Level.apply).getOrElse((assembly / logLevel).value) ) - val Scala2_12Version = "2.12.12" // scala-steward:off (CROM-6777) - 2.12.13 blocked by duplicate import of nowarn - val ScalaVersion: String = Scala2_12Version - val sharedSettings: Seq[Setting[_]] = + val Scala2_12Version = "2.12.14" + private val ScalaVersion: String = Scala2_12Version + private val sharedSettings: Seq[Setting[_]] = cromwellVersionWithGit ++ publishingSettings ++ List( organization := "org.broadinstitute", scalaVersion := ScalaVersion, diff --git a/project/Testing.scala b/project/Testing.scala index f5544540489..89a6ffc6832 100644 --- a/project/Testing.scala +++ b/project/Testing.scala @@ -101,7 +101,11 @@ object Testing { CromwellBenchmarkTest / testFrameworks += new TestFramework("org.scalameter.ScalaMeterFramework"), // Don't execute benchmarks in parallel CromwellBenchmarkTest / parallelExecution := false, - // Make sure no secrets are commited to git + // Until we move away from Travis do not execute ANY tests in parallel (see also Settings.sharedSettings) + Test / parallelExecution := false, + // Since parallelExecution is off do not buffer test results + Test / logBuffered := false, + // Make sure no secrets are committed to git minnieKenny := { val log = streams.value.log val args = spaceDelimited("").parsed @@ -113,7 +117,7 @@ object Testing { }, ) - val integrationTestSettings = List( + private val integrationTestSettings = List( libraryDependencies ++= testDependencies.map(_ % IntegrationTest) ) ++ itSettings diff --git a/project/build.properties b/project/build.properties index f0f04096225..2b4cbe4d584 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1,2 +1,2 @@ # scala-steward:off -sbt.version=1.4.9 +sbt.version=1.5.5 diff --git a/project/plugins.sbt b/project/plugins.sbt index 951fdd6711d..b1936521ef2 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1,4 +1,4 @@ addSbtPlugin("se.marcuslonnberg" % "sbt-docker" % "1.8.2") -addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.15.0") -addSbtPlugin("com.typesafe.sbt" % "sbt-git" % "1.0.0") +addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "1.0.0") +addSbtPlugin("com.typesafe.sbt" % "sbt-git" % "1.0.1") addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.8.2") diff --git a/project/project/build.properties b/project/project/build.properties index f0f04096225..2b4cbe4d584 100644 --- a/project/project/build.properties +++ b/project/project/build.properties @@ -1,2 +1,2 @@ # scala-steward:off -sbt.version=1.4.9 +sbt.version=1.5.5 diff --git a/publish/docker-setup.sh b/publish/docker-setup.sh index d537171662f..1ef181bae19 100755 --- a/publish/docker-setup.sh +++ b/publish/docker-setup.sh @@ -33,7 +33,7 @@ apt-get install \ # sbt launcher non-deb package installation instructions adapted from # - https://github.com/sbt/sbt/releases/tag/v1.4.9 # - https://github.com/broadinstitute/scala-baseimage/pull/4/files -curl --location --fail --silent --show-error "https://github.com/sbt/sbt/releases/download/v1.4.9/sbt-1.4.9.tgz" | +curl --location --fail --silent --show-error "https://github.com/sbt/sbt/releases/download/v1.5.5/sbt-1.5.5.tgz" | tar zxf - -C /usr/share update-alternatives --install /usr/bin/sbt sbt /usr/share/sbt/bin/sbt 1 diff --git a/scripts/docker-develop/Dockerfile b/scripts/docker-develop/Dockerfile index 56fbb0dcfc5..5c89ae046fe 100644 --- a/scripts/docker-develop/Dockerfile +++ b/scripts/docker-develop/Dockerfile @@ -17,8 +17,8 @@ RUN apt-get update -q && \ # Git # Env variables -ENV SCALA_VERSION 2.12.12 -ENV SBT_VERSION 1.4.9 +ENV SCALA_VERSION 2.12.14 +ENV SBT_VERSION 1.5.5 # ## AdoptOpenJDK Hotspot diff --git a/server/src/main/resources/sentry.properties b/server/src/main/resources/sentry.properties new file mode 100644 index 00000000000..ebc5aa32687 --- /dev/null +++ b/server/src/main/resources/sentry.properties @@ -0,0 +1,2 @@ +# Quiet warnings about missing sentry DSNs by providing an empty string +dsn= diff --git a/server/src/main/scala/cromwell/CromwellEntryPoint.scala b/server/src/main/scala/cromwell/CromwellEntryPoint.scala index 951e6bc11f1..a6a196c667d 100644 --- a/server/src/main/scala/cromwell/CromwellEntryPoint.scala +++ b/server/src/main/scala/cromwell/CromwellEntryPoint.scala @@ -6,10 +6,10 @@ import akka.http.scaladsl.unmarshalling.Unmarshal import akka.pattern.GracefulStopSupport import akka.stream.ActorMaterializer import cats.data.Validated._ -import cats.effect.IO +import cats.effect.{ContextShift, IO} import cats.syntax.apply._ import cats.syntax.validated._ -import com.typesafe.config.ConfigFactory +import com.typesafe.config.{Config, ConfigFactory} import common.exception.MessageAggregation import common.validation.ErrorOr._ import cromwell.CommandLineArguments.{ValidSubmission, WorkflowSourceOrUrl} @@ -21,40 +21,36 @@ import cromwell.core.{WorkflowSourceFilesCollection, WorkflowSourceFilesWithDepe import cromwell.engine.workflow.SingleWorkflowRunnerActor import cromwell.engine.workflow.SingleWorkflowRunnerActor.RunWorkflow import cromwell.server.{CromwellServer, CromwellShutdown, CromwellSystem} -import io.sentry.Sentry -import io.sentry.config.Lookup -import io.sentry.dsn.Dsn -import io.sentry.util.Util import net.ceedubs.ficus.Ficus._ import org.slf4j.LoggerFactory import scala.collection.JavaConverters._ import scala.concurrent.duration._ -import scala.concurrent.{Await, Future, TimeoutException} +import scala.concurrent.{Await, ExecutionContext, Future, TimeoutException} import scala.language.postfixOps import scala.util.{Failure, Success, Try} object CromwellEntryPoint extends GracefulStopSupport { - lazy val EntryPointLogger = LoggerFactory.getLogger("Cromwell EntryPoint") + private lazy val EntryPointLogger = LoggerFactory.getLogger("Cromwell EntryPoint") private lazy val config = ConfigFactory.load() // Only abort jobs on SIGINT if the config explicitly sets system.abort-jobs-on-terminate = true. - val abortJobsOnTerminate = config.as[Option[Boolean]]("system.abort-jobs-on-terminate") + private val abortJobsOnTerminate = config.as[Option[Boolean]]("system.abort-jobs-on-terminate") - val gracefulShutdown = config.as[Boolean]("system.graceful-server-shutdown") + private val gracefulShutdown = config.as[Boolean]("system.graceful-server-shutdown") // 3 minute DNS TTL down from JVM default of infinite [BA-6454] - val dnsCacheTtl = config.getOrElse("system.dns-cache-ttl", 3 minutes) + private val dnsCacheTtl = config.getOrElse("system.dns-cache-ttl", 3 minutes) java.security.Security.setProperty("networkaddress.cache.ttl", dnsCacheTtl.toSeconds.toString) /** * Run Cromwell in server mode. */ - def runServer() = { + def runServer(): Unit = { initLogging(Server) - val system = buildCromwellSystem(Server) + val system = buildCromwellSystem() waitAndExit(CromwellServer.run(gracefulShutdown, abortJobsOnTerminate.getOrElse(false)) _, system) } @@ -66,8 +62,8 @@ object CromwellEntryPoint extends GracefulStopSupport { val sources = validateRunArguments(args) - val cromwellSystem = buildCromwellSystem(Run) - implicit val actorSystem = cromwellSystem.actorSystem + val cromwellSystem = buildCromwellSystem() + implicit val actorSystem: ActorSystem = cromwellSystem.actorSystem val runnerProps = SingleWorkflowRunnerActor.props( source = sources, @@ -88,10 +84,10 @@ object CromwellEntryPoint extends GracefulStopSupport { initLogging(Submit) lazy val Log = LoggerFactory.getLogger("cromwell-submit") - implicit val actorSystem = ActorSystem("SubmitSystem") - implicit val materializer = ActorMaterializer() - implicit val ec = actorSystem.dispatcher - implicit val cs = IO.contextShift(ec) + implicit val actorSystem: ActorSystem = ActorSystem("SubmitSystem") + implicit val materializer: ActorMaterializer = ActorMaterializer() + implicit val ec: ExecutionContext = actorSystem.dispatcher + implicit val cs: ContextShift[IO] = IO.contextShift(ec) val cromwellClient = new CromwellClient(args.host, "v2") @@ -120,11 +116,11 @@ object CromwellEntryPoint extends GracefulStopSupport { waitAndExit(submissionFuture, () => actorSystem.terminate()) } - private def buildCromwellSystem(command: Command): CromwellSystem = { + private def buildCromwellSystem(): CromwellSystem = { lazy val Log = LoggerFactory.getLogger("cromwell") Try { new CromwellSystem { - override lazy val config = CromwellEntryPoint.config + override lazy val config: Config = CromwellEntryPoint.config } } recoverWith { case t: Throwable => @@ -170,15 +166,10 @@ object CromwellEntryPoint extends GracefulStopSupport { Make sure that the next time one uses the ConfigFactory that our updated system properties are loaded. */ ConfigFactory.invalidateCaches() - - // Quiet warnings about missing sentry DSNs by just providing the default. - val dsn = Option(Lookup.getDefault().get("dsn")).filterNot(Util.isNullOrEmpty).getOrElse( - Dsn.DEFAULT_DSN + "&stacktrace.app.packages=quieted_with_any_value_because_empty_was_not_working") - Sentry.init(dsn) () } - protected def waitAndExit[A](operation: () => Future[A], shutdown: () => Future[Any]) = { + protected def waitAndExit[A](operation: () => Future[A], shutdown: () => Future[Any]): Nothing = { val futureResult = operation() Await.ready(futureResult, Duration.Inf) diff --git a/server/src/test/scala/cromwell/CromwellTestKitSpec.scala b/server/src/test/scala/cromwell/CromwellTestKitSpec.scala index 32216b77239..1d0ce5db516 100644 --- a/server/src/test/scala/cromwell/CromwellTestKitSpec.scala +++ b/server/src/test/scala/cromwell/CromwellTestKitSpec.scala @@ -94,7 +94,7 @@ object CromwellTestKitSpec { | # Some of our tests fire off a message, then expect a particular event message within 3s (the default). | # Especially on CI, the metadata test does not seem to be returning in time. So, overriding the timeouts | # with slightly higher values. Alternatively, could also adjust the akka.test.timefactor only in CI. - | filter-leeway = 10s + | filter-leeway = 60s | single-expect-default = 5s | default-timeout = 10s | } diff --git a/server/src/test/scala/cromwell/engine/workflow/lifecycle/execution/WorkflowExecutionActorSpec.scala b/server/src/test/scala/cromwell/engine/workflow/lifecycle/execution/WorkflowExecutionActorSpec.scala index e8fb65c8b7a..31acf5329c2 100644 --- a/server/src/test/scala/cromwell/engine/workflow/lifecycle/execution/WorkflowExecutionActorSpec.scala +++ b/server/src/test/scala/cromwell/engine/workflow/lifecycle/execution/WorkflowExecutionActorSpec.scala @@ -27,7 +27,7 @@ import scala.concurrent.{Await, Promise} class WorkflowExecutionActorSpec extends CromwellTestKitSpec with AnyFlatSpecLike with Matchers with BeforeAndAfter with WorkflowDescriptorBuilderForSpecs { override implicit val actorSystem = system - implicit val DefaultDuration = 20.seconds.dilated + implicit val DefaultDuration = 60.seconds.dilated def mockServiceRegistryActor = TestActorRef(new Actor { override def receive = { diff --git a/services/src/main/scala/cromwell/services/instrumentation/impl/statsd/StatsDInstrumentationServiceActor.scala b/services/src/main/scala/cromwell/services/instrumentation/impl/statsd/StatsDInstrumentationServiceActor.scala index f96ee69585c..4c886c80d30 100644 --- a/services/src/main/scala/cromwell/services/instrumentation/impl/statsd/StatsDInstrumentationServiceActor.scala +++ b/services/src/main/scala/cromwell/services/instrumentation/impl/statsd/StatsDInstrumentationServiceActor.scala @@ -9,7 +9,7 @@ import cromwell.services.instrumentation.InstrumentationService.InstrumentationS import cromwell.services.instrumentation._ import cromwell.services.instrumentation.impl.statsd.StatsDInstrumentationServiceActor._ import cromwell.util.GracefulShutdownHelper.ShutdownCommand -import nl.grons.metrics.scala.{DefaultInstrumented, Meter, MetricName} +import nl.grons.metrics4.scala.{DefaultInstrumented, Meter, MetricName} import net.ceedubs.ficus.Ficus._ import scala.collection.JavaConverters._ @@ -18,13 +18,15 @@ import scala.concurrent.duration._ object StatsDInstrumentationServiceActor { val CromwellMetricPrefix: String = "cromwell" - def props(serviceConfig: Config, globalConfig: Config, serviceRegistryActor: ActorRef) = Props(new StatsDInstrumentationServiceActor(serviceConfig, globalConfig, serviceRegistryActor)) + def props(serviceConfig: Config, globalConfig: Config, serviceRegistryActor: ActorRef): Props = + Props(new StatsDInstrumentationServiceActor(serviceConfig, globalConfig, serviceRegistryActor)) implicit class CromwellBucketEnhanced(val cromwellBucket: CromwellBucket) extends AnyVal { /** - * Transforms a CromwellBucket to a StatsD path, optionally inserting a value between prefix and path + * Transforms a CromwellBucket to a StatsD path, optionally inserting a value between prefix and path */ - def toStatsDString(insert: Option[String] = None) = (cromwellBucket.prefix ++ insert ++ cromwellBucket.path.toList).mkString(".") + def toStatsDString(insert: Option[String] = None): String = + (cromwellBucket.prefix ++ insert ++ cromwellBucket.path.toList).mkString(".") } } @@ -44,10 +46,11 @@ object StatsDInstrumentationServiceActor { * by making use of downsampling and / or multi metrics packets: https://github.com/etsy/statsd/blob/master/docs/metric_types.md */ class StatsDInstrumentationServiceActor(serviceConfig: Config, globalConfig: Config, serviceRegistryActor: ActorRef) extends Actor with DefaultInstrumented { - val statsDConfig = StatsDConfig(serviceConfig) + val statsDConfig: StatsDConfig = StatsDConfig(serviceConfig) val cromwellInstanceIdOption: Option[String] = globalConfig.getAs[String]("system.cromwell_id") - override lazy val metricBaseName = MetricName(CromwellMetricPrefix + cromwellInstanceIdOption.fold("")("." + _)) + override lazy val metricBaseName: MetricName = + MetricName(CromwellMetricPrefix + cromwellInstanceIdOption.fold("")("." + _)) val gaugeFunctions = new ConcurrentHashMap[CromwellBucket, Long]() @@ -59,7 +62,7 @@ class StatsDInstrumentationServiceActor(serviceConfig: Config, globalConfig: Con .build(CromwellStatsD(statsDConfig.hostname, statsDConfig.port)) .start(statsDConfig.flushRate.toMillis, TimeUnit.MILLISECONDS) - override def receive = { + override def receive: Receive = { case InstrumentationServiceMessage(cromwellMetric) => cromwellMetric match { case CromwellIncrement(bucket) => increment(bucket) case CromwellCount(bucket, value, _) => updateCounter(bucket, value) @@ -87,12 +90,12 @@ class StatsDInstrumentationServiceActor(serviceConfig: Config, globalConfig: Con /** * Increment the counter value for this bucket */ - private def increment(bucket: CromwellBucket) = meterFor(bucket).mark(1L) + private def increment(bucket: CromwellBucket): Unit = meterFor(bucket).mark(1L) /** * Update the counter value for this bucket by adding (or subtracting) value */ - private def updateCounter(bucket: CromwellBucket, value: Long) = meterFor(bucket).mark(value) + private def updateCounter(bucket: CromwellBucket, value: Long): Unit = meterFor(bucket).mark(value) /** * Update the gauge value for this bucket @@ -109,7 +112,7 @@ class StatsDInstrumentationServiceActor(serviceConfig: Config, globalConfig: Con /** * Adds a new timing value for this bucket */ - private def updateTiming(bucket: CromwellBucket, value: FiniteDuration) = { + private def updateTiming(bucket: CromwellBucket, value: FiniteDuration): Unit = { metrics.timer(bucket.toStatsDString()).update(value) } } diff --git a/services/src/main/scala/cromwell/services/metadata/impl/archiver/ArchiveMetadataSchedulerActor.scala b/services/src/main/scala/cromwell/services/metadata/impl/archiver/ArchiveMetadataSchedulerActor.scala index 2f73ffd9393..da56d649ed7 100644 --- a/services/src/main/scala/cromwell/services/metadata/impl/archiver/ArchiveMetadataSchedulerActor.scala +++ b/services/src/main/scala/cromwell/services/metadata/impl/archiver/ArchiveMetadataSchedulerActor.scala @@ -232,7 +232,11 @@ class ArchiveMetadataSchedulerActor(archiveMetadataConfig: ArchiveMetadataConfig _ = sendTiming(archiverStreamTimingMetricsBasePath :+ "create_gcs_stream", calculateTimeDifference(gotAsyncIoTime, gcsStreamCreatedTime), ServicesPrefix) crc32cStream = new Crc32cStream() teeStream = new TeeingOutputStream(gcsStream, crc32cStream, new ByteCountingOutputStream()) - csvPrinter = new CSVPrinter(new OutputStreamWriter(teeStream), CSVFormat.DEFAULT.withHeader(CsvFileHeaders : _*)) + csvPrinter = + new CSVPrinter( + new OutputStreamWriter(teeStream), + CSVFormat.DEFAULT.builder().setHeader(CsvFileHeaders : _*).build(), + ) csvPrinterCreatedTime = OffsetDateTime.now() _ = sendTiming(archiverStreamTimingMetricsBasePath :+ "create_csv_printer", calculateTimeDifference(gcsStreamCreatedTime, csvPrinterCreatedTime), ServicesPrefix) _ <- stream.foreach(me => { diff --git a/services/src/test/scala/cromwell/services/ServicesSpec.scala b/services/src/test/scala/cromwell/services/ServicesSpec.scala index a9b8dd6688e..364d2e26bf6 100644 --- a/services/src/test/scala/cromwell/services/ServicesSpec.scala +++ b/services/src/test/scala/cromwell/services/ServicesSpec.scala @@ -74,7 +74,7 @@ abstract class ServicesSpec extends TestKitSuite with Matchers with AnyWordSpecLike with ScalaFutures { override protected lazy val actorSystemConfig: Config = ServicesSpec.config - implicit val timeout: Timeout = Timeout(20.seconds.dilated) + implicit val timeout: Timeout = Timeout(60.seconds.dilated) implicit val ec: ExecutionContext = system.dispatcher implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = Span(1, Minute), interval = Span(100, Millis)) } diff --git a/services/src/test/scala/cromwell/services/database/DatabaseTestKit.scala b/services/src/test/scala/cromwell/services/database/DatabaseTestKit.scala index f8ce024fd2f..651248b70cd 100644 --- a/services/src/test/scala/cromwell/services/database/DatabaseTestKit.scala +++ b/services/src/test/scala/cromwell/services/database/DatabaseTestKit.scala @@ -1,9 +1,7 @@ package cromwell.services.database -import java.sql.Connection - import better.files._ -import com.dimafeng.testcontainers.{Container, JdbcDatabaseContainer, MariaDBContainer, MySQLContainer, PostgreSQLContainer} +import com.dimafeng.testcontainers._ import com.typesafe.config.{Config, ConfigFactory} import com.typesafe.scalalogging.StrictLogging import cromwell.database.migration.liquibase.LiquibaseUtils @@ -12,12 +10,14 @@ import cromwell.services.ServicesStore.EnhancedSqlDatabase import cromwell.services.{EngineServicesStore, MetadataServicesStore} import liquibase.snapshot.DatabaseSnapshot import liquibase.structure.core.Index +import org.testcontainers.containers.{JdbcDatabaseContainer => JavaJdbcDatabaseContainer} import org.testcontainers.utility.DockerImageName import slick.jdbc.JdbcProfile import slick.jdbc.meta.{MIndexInfo, MPrimaryKey} +import java.sql.Connection import scala.concurrent.Await -import scala.concurrent.duration.Duration +import scala.concurrent.duration._ object DatabaseTestKit extends StrictLogging { @@ -109,7 +109,7 @@ object DatabaseTestKit extends StrictLogging { } def getDatabaseTestContainer(databaseSystem: DatabaseSystem): Option[Container] = { - databaseSystem match { + val containerOption: Option[SingleContainer[_ <: JavaJdbcDatabaseContainer[_]]] = databaseSystem match { case HsqldbDatabaseSystem => None case networkDbSystem: NetworkDatabaseSystem => networkDbSystem.platform match { @@ -135,6 +135,12 @@ object DatabaseTestKit extends StrictLogging { case _ => None } } + + // Give low cpu/mem CI a bit more patience to start the container + containerOption.map(_.configure { container => + container.withStartupTimeoutSeconds(5.minutes.toSeconds.toInt) + () + }) } def initializeDatabaseByContainerOptTypeAndSystem[A <: SlickDatabase](containerOpt: Option[Container], diff --git a/services/src/test/scala/cromwell/services/database/LiquibaseComparisonSpec.scala b/services/src/test/scala/cromwell/services/database/LiquibaseComparisonSpec.scala index 45d7aca64e5..2b292c871ac 100644 --- a/services/src/test/scala/cromwell/services/database/LiquibaseComparisonSpec.scala +++ b/services/src/test/scala/cromwell/services/database/LiquibaseComparisonSpec.scala @@ -25,16 +25,18 @@ import scala.reflect._ */ class LiquibaseComparisonSpec extends AnyFlatSpec with CromwellTimeoutSpec with Matchers with ScalaFutures { - implicit val executionContext = ExecutionContext.global + implicit val executionContext: ExecutionContext = ExecutionContext.global - implicit val defaultPatience = PatienceConfig(timeout = scaled(5.seconds), interval = scaled(100.millis)) + implicit val defaultPatience: PatienceConfig = + PatienceConfig(timeout = scaled(5.seconds), interval = scaled(100.millis)) CromwellDatabaseType.All foreach { databaseType => + lazy val expectedSnapshot = DatabaseTestKit.inMemorySnapshot(databaseType, SlickSchemaManager) - lazy val expectedColumns = get[Column](expectedSnapshot) - lazy val expectedPrimaryKeys = get[PrimaryKey](expectedSnapshot) - lazy val expectedForeignKeys = get[ForeignKey](expectedSnapshot) - lazy val expectedUniqueConstraints = get[UniqueConstraint](expectedSnapshot) + lazy val expectedColumns = get[Column](expectedSnapshot).sorted + lazy val expectedPrimaryKeys = get[PrimaryKey](expectedSnapshot).sorted + lazy val expectedForeignKeys = get[ForeignKey](expectedSnapshot).sorted + lazy val expectedUniqueConstraints = get[UniqueConstraint](expectedSnapshot).sorted lazy val expectedIndexes = get[Index](expectedSnapshot) filterNot DatabaseTestKit.isGenerated DatabaseSystem.All foreach { databaseSystem => @@ -80,7 +82,8 @@ class LiquibaseComparisonSpec extends AnyFlatSpec with CromwellTimeoutSpec with // Auto increment columns may have different types, such as SERIAL/BIGSERIAL // https://www.postgresql.org/docs/11/datatype-numeric.html#DATATYPE-SERIAL val actualColumnDefault = ColumnDefault(actualColumnType, actualColumn.getDefaultValue) - val autoIncrementDefault = getAutoIncrementDefault(databaseSystem, columnMapping, expectedColumn) + val autoIncrementDefault = + getAutoIncrementDefault(expectedColumn, columnMapping, databaseSystem, connectionMetadata) actualColumnDefault should be(autoIncrementDefault) } else { @@ -162,6 +165,8 @@ class LiquibaseComparisonSpec extends AnyFlatSpec with CromwellTimeoutSpec with } val actualForeignKey = actualForeignKeyOption getOrElse fail(s"Did not find $description") + actualForeignKey.getPrimaryKeyTable.getName should be(expectedForeignKey.getPrimaryKeyTable.getName) + actualForeignKey.getForeignKeyTable.getName should be(expectedForeignKey.getForeignKeyTable.getName) actualForeignKey.getPrimaryKeyColumns.asScala.map(ColumnDescription.from) should contain theSameElementsAs expectedForeignKey.getPrimaryKeyColumns.asScala.map(ColumnDescription.from) actualForeignKey.getForeignKeyColumns.asScala.map(ColumnDescription.from) should @@ -194,6 +199,7 @@ class LiquibaseComparisonSpec extends AnyFlatSpec with CromwellTimeoutSpec with val actualUniqueConstraint = actualUniqueConstraintOption getOrElse fail(s"Did not find $description") + actualUniqueConstraint.getRelation.getName should be(expectedUniqueConstraint.getRelation.getName) actualUniqueConstraint.getColumns.asScala.map(ColumnDescription.from) should contain theSameElementsAs expectedUniqueConstraint.getColumns.asScala.map(ColumnDescription.from) } @@ -228,7 +234,7 @@ class LiquibaseComparisonSpec extends AnyFlatSpec with CromwellTimeoutSpec with object LiquibaseComparisonSpec { private def get[T <: DatabaseObject : ClassTag : Ordering](databaseSnapshot: DatabaseSnapshot): Seq[T] = { val databaseObjectClass = classTag[T].runtimeClass.asInstanceOf[Class[T]] - databaseSnapshot.get(databaseObjectClass).asScala.toSeq.sorted + databaseSnapshot.get(databaseObjectClass).asScala.toSeq } private val DefaultNullBoolean = Boolean.box(false) @@ -276,8 +282,8 @@ object LiquibaseComparisonSpec { case class ColumnMapping ( - typeMapping: Map[ColumnType, ColumnType] = Map.empty, - defaultMapping: Map[ColumnDefault, ColumnDefault] = Map.empty, + typeMapping: PartialFunction[ColumnType, ColumnType] = PartialFunction.empty, + defaultMapping: Map[ColumnDefault, AnyRef] = Map.empty, ) /** Generate the expected PostgreSQL sequence name for a column. */ @@ -315,6 +321,9 @@ object LiquibaseComparisonSpec { private val HsqldbTypeInteger = ColumnType("INTEGER", Option(32)) private val HsqldbTypeTimestamp = ColumnType("TIMESTAMP") + // Defaults as they are represented in HSQLDB that will have different representations in other DBMS. + private val HsqldbDefaultBooleanTrue = ColumnDefault(HsqldbTypeBoolean, Boolean.box(true)) + // Nothing to map as the original is also HSQLDB private val HsqldbColumnMapping = ColumnMapping() @@ -331,13 +340,25 @@ object LiquibaseComparisonSpec { HsqldbTypeTimestamp -> ColumnType("DATETIME"), ), defaultMapping = Map( - ColumnDefault(HsqldbTypeBoolean, Boolean.box(true)) -> - ColumnDefault(ColumnType("TINYINT", Option(3)), Int.box(1)), + HsqldbDefaultBooleanTrue -> Int.box(1) ), ) - // MariaDB should behave exactly the same as MySQL - private val MariadbColumnMapping = MysqldbColumnMapping + // MariaDB should behave similar to MySQL except that only LOBs have sizes + private val MariadbColumnMapping = + ColumnMapping( + typeMapping = Map( + HsqldbTypeBigInt -> ColumnType("BIGINT"), + HsqldbTypeBlob -> ColumnType("LONGBLOB", Option(2147483647)), + HsqldbTypeBoolean -> ColumnType("TINYINT"), + HsqldbTypeClob -> ColumnType("LONGTEXT", Option(2147483647)), + HsqldbTypeInteger -> ColumnType("INT"), + HsqldbTypeTimestamp -> ColumnType("DATETIME"), + ), + defaultMapping = Map( + HsqldbDefaultBooleanTrue -> Int.box(1), + ), + ) private val PostgresqlColumnMapping = ColumnMapping( @@ -367,24 +388,26 @@ object LiquibaseComparisonSpec { */ private def getColumnType(column: Column, columnMapping: ColumnMapping): ColumnType = { val columnType = ColumnType.from(column) - columnMapping.typeMapping.getOrElse(columnType, columnType) + columnMapping.typeMapping.applyOrElse[ColumnType, ColumnType](columnType, _ => columnType) } /** * Returns the default for the column, either from ColumnMapping or the column itself. */ private def getColumnDefault(column: Column, columnMapping: ColumnMapping): AnyRef = { - columnMapping.defaultMapping get ColumnDefault.from(column) map (_.defaultValue) getOrElse column.getDefaultValue + columnMapping.defaultMapping.getOrElse(ColumnDefault.from(column), column.getDefaultValue) } /** * Return the default for the auto increment column. */ - private def getAutoIncrementDefault(databaseSystem: DatabaseSystem, + private def getAutoIncrementDefault(column: Column, columnMapping: ColumnMapping, - column: Column): ColumnDefault = { + databaseSystem: DatabaseSystem, + connectionMetadata: ConnectionMetadata, + ): ColumnDefault = { databaseSystem.platform match { - case PostgresqlDatabasePlatform => + case PostgresqlDatabasePlatform if connectionMetadata.databaseMajorVersion <= 9 => val columnType = column.getType.getTypeName match { case "BIGINT" => ColumnType("BIGSERIAL", None) case "INTEGER" => ColumnType("SERIAL", None) @@ -441,8 +464,8 @@ object LiquibaseComparisonSpec { /** * Returns an optional extra check to ensure that sequences have the same types as their auto increment columns. * - * This is because PostgreSQL requires two statements to modify SERIAL columns to BIGSERIAL, one to widen the column, - * and another to widen the sequence. + * This is because PostgreSQL <= 9 requires two statements to modify SERIAL columns to BIGSERIAL, one to widen the + * column, and another to widen the sequence. * * https://stackoverflow.com/questions/52195303/postgresql-primary-key-id-datatype-from-serial-to-bigserial#answer-52195920 * https://www.postgresql.org/docs/11/datatype-numeric.html#DATATYPE-SERIAL @@ -455,8 +478,6 @@ object LiquibaseComparisonSpec { case PostgresqlDatabasePlatform if column.isAutoIncrement && connectionMetadata.databaseMajorVersion <= 9 => // "this is currently always bigint" --> https://www.postgresql.org/docs/9.6/infoschema-sequences.html Option("bigint") - case PostgresqlDatabasePlatform if column.isAutoIncrement => - Option(column.getType.getTypeName.toLowerCase) case _ => None } } @@ -467,8 +488,8 @@ object LiquibaseComparisonSpec { import database.dataAccess.driver.api._ databaseSystem.platform match { case PostgresqlDatabasePlatform if column.isAutoIncrement => - //noinspection SqlDialectInspection - sql"""select data_type + //noinspection SqlDialectInspection + sql"""select data_type from INFORMATION_SCHEMA.sequences where sequence_name = '#${postgresqlSeqName(column)}' """.as[String].head diff --git a/services/src/test/scala/cromwell/services/database/LobSpec.scala b/services/src/test/scala/cromwell/services/database/LobSpec.scala index ed6723b90fc..a92cf84aed9 100644 --- a/services/src/test/scala/cromwell/services/database/LobSpec.scala +++ b/services/src/test/scala/cromwell/services/database/LobSpec.scala @@ -1,7 +1,5 @@ package cromwell.services.database -import java.time.OffsetDateTime - import com.dimafeng.testcontainers.Container import common.assertion.CromwellTimeoutSpec import cromwell.core.Tags._ @@ -9,19 +7,21 @@ import cromwell.core.WorkflowId import cromwell.database.sql.SqlConverters._ import cromwell.database.sql.joins.JobStoreJoin import cromwell.database.sql.tables.{JobStoreEntry, JobStoreSimpletonEntry, WorkflowStoreEntry} -import javax.sql.rowset.serial.SerialBlob import org.scalatest.concurrent.ScalaFutures import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers +import java.time.OffsetDateTime +import javax.sql.rowset.serial.SerialBlob import scala.concurrent.ExecutionContext import scala.concurrent.duration._ class LobSpec extends AnyFlatSpec with CromwellTimeoutSpec with Matchers with ScalaFutures { - implicit val executionContext = ExecutionContext.global + implicit val executionContext: ExecutionContext = ExecutionContext.global - implicit val defaultPatience = PatienceConfig(timeout = scaled(5.seconds), interval = scaled(100.millis)) + implicit val defaultPatience: PatienceConfig = + PatienceConfig(timeout = scaled(5.seconds), interval = scaled(100.millis)) DatabaseSystem.All foreach { databaseSystem => @@ -35,7 +35,7 @@ class LobSpec extends AnyFlatSpec with CromwellTimeoutSpec with Matchers with Sc containerOpt.foreach { _.start } } - it should "fail to store and retrieve empty blobs" taggedAs DbmsTest in { + it should "store empty blobs" taggedAs DbmsTest in { // See notes in BytesToBlobOption import eu.timepit.refined.auto._ val clob = "".toClob(default = "{}") diff --git a/services/src/test/scala/cromwell/services/keyvalue/impl/KeyValueDatabaseSpec.scala b/services/src/test/scala/cromwell/services/keyvalue/impl/KeyValueDatabaseSpec.scala index 16223fdaaf0..813079480dc 100644 --- a/services/src/test/scala/cromwell/services/keyvalue/impl/KeyValueDatabaseSpec.scala +++ b/services/src/test/scala/cromwell/services/keyvalue/impl/KeyValueDatabaseSpec.scala @@ -125,7 +125,8 @@ object KeyValueDatabaseSpec { private def getFailureRegex(databaseSystem: DatabaseSystem): String = { databaseSystem.platform match { case HsqldbDatabasePlatform => - "integrity constraint violation: NOT NULL check constraint; SYS_CT_10591 table: JOB_KEY_VALUE_ENTRY column: STORE_VALUE" + """integrity constraint violation: NOT NULL check constraint; """ + + """SYS_CT_\d+ table: JOB_KEY_VALUE_ENTRY column: STORE_VALUE""" case MariadbDatabasePlatform => """\(conn=\d+\) Column 'STORE_VALUE' cannot be null""" case MysqlDatabasePlatform => "Column 'STORE_VALUE' cannot be null" case PostgresqlDatabasePlatform => diff --git a/services/src/test/scala/cromwell/services/keyvalue/impl/KeyValueServiceActorSpec.scala b/services/src/test/scala/cromwell/services/keyvalue/impl/KeyValueServiceActorSpec.scala index fef3fc3f6ef..1faee0ac315 100644 --- a/services/src/test/scala/cromwell/services/keyvalue/impl/KeyValueServiceActorSpec.scala +++ b/services/src/test/scala/cromwell/services/keyvalue/impl/KeyValueServiceActorSpec.scala @@ -2,13 +2,17 @@ package cromwell.services.keyvalue.impl import akka.actor.ActorRef import akka.pattern._ -import akka.testkit.TestProbe +import akka.testkit.{TestDuration, TestProbe} import com.typesafe.config.{Config, ConfigFactory} import cromwell.core.WorkflowId import cromwell.services.ServicesSpec import cromwell.services.keyvalue.KeyValueServiceActor._ +import org.scalatest.concurrent.Eventually +import org.scalatest.concurrent.PatienceConfiguration.{Interval, Timeout} -class KeyValueServiceActorSpec extends ServicesSpec { +import scala.concurrent.duration._ + +class KeyValueServiceActorSpec extends ServicesSpec with Eventually { val cromwellConfig: Config = ConfigFactory.parseString( s"""services: { @@ -38,6 +42,14 @@ class KeyValueServiceActorSpec extends ServicesSpec { val kvPair3: KvPair = KvPair(ScopedKey(wfID, jobKey2, "k1"), "v1") "KeyValueServiceActor" should { + "eventually insert a single key/value" in { + // Wait a bit longer for yet another in memory database plus actor system to be created and liquibased + eventually(Timeout(defaultPatience.timeout.scaledBy(3)), Interval(15.seconds.dilated)) { + val kvPut1 = KvPut(KvPair(ScopedKey(wfID, jobKey1, "k1"), "v1")) + (sqlKvServiceActor ? kvPut1).mapTo[KvResponse].futureValue + } + } + "insert a key/value" in { val kvPut1 = KvPut(KvPair(ScopedKey(wfID, jobKey1, "k1"), "v1")) diff --git a/services/src/test/scala/cromwell/services/metadata/impl/MetadataServiceActorSpec.scala b/services/src/test/scala/cromwell/services/metadata/impl/MetadataServiceActorSpec.scala index bbff48a4725..eed3dacf79b 100644 --- a/services/src/test/scala/cromwell/services/metadata/impl/MetadataServiceActorSpec.scala +++ b/services/src/test/scala/cromwell/services/metadata/impl/MetadataServiceActorSpec.scala @@ -119,6 +119,12 @@ class MetadataServiceActorSpec extends ServicesSpec { actorName should { + "receive a response to query1 with more time for the system to start up" in { + eventually(Timeout(30.seconds), Interval(2.seconds)) { + Await.result((actor ? GetMetadataAction(query1)).mapTo[SuccessfulMetadataJsonResponse], 1.seconds) + } + } + testCases foreach { case (name, query, expectation) => s"perform $name correctly" in { diff --git a/src/ci/bin/test.inc.sh b/src/ci/bin/test.inc.sh index fb2a695632e..a8640329f81 100644 --- a/src/ci/bin/test.inc.sh +++ b/src/ci/bin/test.inc.sh @@ -813,7 +813,7 @@ cromwell::private::install_sbt_launcher() { # Non-deb package installation instructions adapted from # - https://github.com/sbt/sbt/releases/tag/v1.4.9 # - https://github.com/broadinstitute/scala-baseimage/pull/4/files - curl --location --fail --silent --show-error "https://github.com/sbt/sbt/releases/download/v1.4.9/sbt-1.4.9.tgz" | + curl --location --fail --silent --show-error "https://github.com/sbt/sbt/releases/download/v1.5.5/sbt-1.5.5.tgz" | sudo tar zxf - -C /usr/share sudo update-alternatives --install /usr/bin/sbt sbt /usr/share/sbt/bin/sbt 1 } @@ -1147,9 +1147,9 @@ cromwell::private::assemble_jars() { # CROMWELL_BUILD_SBT_ASSEMBLY_COMMAND allows for an override of the default `assembly` command for assembly. # This can be useful to reduce time and memory that might otherwise be spent assembling unused subprojects. # shellcheck disable=SC2086 - CROMWELL_SBT_ASSEMBLY_LOG_LEVEL=error \ - sbt \ + sbt \ -Dsbt.supershell=false \ + 'set ThisBuild / assembly / logLevel := Level.Error' \ --warn \ ${CROMWELL_BUILD_SBT_COVERAGE_COMMAND} \ --error \ @@ -1198,11 +1198,11 @@ cromwell::private::generate_code_coverage() { } cromwell::private::publish_artifacts_only() { - CROMWELL_SBT_ASSEMBLY_LOG_LEVEL=warn sbt -Dsbt.supershell=false --warn "$@" publish + sbt 'set ThisBuild / assembly / logLevel := Level.Warn' -Dsbt.supershell=false --warn "$@" publish } cromwell::private::publish_artifacts_and_docker() { - CROMWELL_SBT_ASSEMBLY_LOG_LEVEL=warn sbt -Dsbt.supershell=false --warn "$@" publish dockerBuildAndPush + sbt 'set ThisBuild / assembly / logLevel := Level.Warn' -Dsbt.supershell=false --warn "$@" publish dockerBuildAndPush } cromwell::private::publish_artifacts_check() { @@ -1510,7 +1510,7 @@ cromwell:build::run_sbt_test() { sbt \ -Dsbt.supershell=false \ ${CROMWELL_BUILD_SBT_COVERAGE_COMMAND} \ - test:compile + Test/compile local sbt_tests diff --git a/src/ci/docker-compose/cromwell-test/docker-setup.sh b/src/ci/docker-compose/cromwell-test/docker-setup.sh index e48d7ffdc7c..c7970c7349b 100755 --- a/src/ci/docker-compose/cromwell-test/docker-setup.sh +++ b/src/ci/docker-compose/cromwell-test/docker-setup.sh @@ -66,7 +66,7 @@ apt-get clean # non-deb package installation instructions adapted from # - https://github.com/sbt/sbt/releases/tag/v1.4.9 # - https://github.com/broadinstitute/scala-baseimage/pull/4/files -curl -L --silent "https://github.com/sbt/sbt/releases/download/v1.4.9/sbt-1.4.9.tgz" | +curl -L --silent "https://github.com/sbt/sbt/releases/download/v1.5.5/sbt-1.5.5.tgz" | tar zxf - -C /usr/share update-alternatives --install /usr/bin/sbt sbt /usr/share/sbt/bin/sbt 1 diff --git a/supportedBackends/aws/src/main/scala/cromwell/backend/impl/aws/AwsBatchJob.scala b/supportedBackends/aws/src/main/scala/cromwell/backend/impl/aws/AwsBatchJob.scala index 2378c48c8ef..90f1918b9da 100755 --- a/supportedBackends/aws/src/main/scala/cromwell/backend/impl/aws/AwsBatchJob.scala +++ b/supportedBackends/aws/src/main/scala/cromwell/backend/impl/aws/AwsBatchJob.scala @@ -267,8 +267,17 @@ final case class AwsBatchJob(jobDescriptor: BackendJobDescriptor, // WDL/CWL generateEnvironmentKVPairs(runtimeAttributes.scriptS3BucketName, scriptKeyPrefix, scriptKey): _* ) - .memory(runtimeAttributes.memory.to(MemoryUnit.MB).amount.toInt) - .vcpus(runtimeAttributes.cpu.##).build + .resourceRequirements( + ResourceRequirement.builder() + .`type`(ResourceType.VCPU) + .value(runtimeAttributes.cpu.value.toString) + .build(), + ResourceRequirement.builder() + .`type`(ResourceType.MEMORY) + .value(runtimeAttributes.memory.to(MemoryUnit.MB).amount.toInt.toString) + .build(), + ) + .build() ) .jobQueue(runtimeAttributes.queueArn) .jobDefinition(definitionArn) diff --git a/supportedBackends/aws/src/main/scala/cromwell/backend/impl/aws/AwsBatchJobDefinition.scala b/supportedBackends/aws/src/main/scala/cromwell/backend/impl/aws/AwsBatchJobDefinition.scala index 137cce9a4ef..ae356b89772 100755 --- a/supportedBackends/aws/src/main/scala/cromwell/backend/impl/aws/AwsBatchJobDefinition.scala +++ b/supportedBackends/aws/src/main/scala/cromwell/backend/impl/aws/AwsBatchJobDefinition.scala @@ -31,16 +31,14 @@ package cromwell.backend.impl.aws -import scala.language.postfixOps import scala.collection.mutable.ListBuffer import cromwell.backend.BackendJobDescriptor import cromwell.backend.io.JobPaths -import software.amazon.awssdk.services.batch.model.{ContainerProperties, Host, KeyValuePair, MountPoint, Volume} +import software.amazon.awssdk.services.batch.model.{ContainerProperties, Host, KeyValuePair, MountPoint, ResourceRequirement, ResourceType, Volume} import cromwell.backend.impl.aws.io.AwsBatchVolume import scala.collection.JavaConverters._ import java.security.MessageDigest - import org.apache.commons.lang3.builder.{ToStringBuilder, ToStringStyle} import org.slf4j.{Logger, LoggerFactory} import wdl4s.parser.MemoryUnit @@ -160,8 +158,16 @@ trait AwsBatchJobDefinitionBuilder { (builder .command(packedCommand.asJava) - .memory(context.runtimeAttributes.memory.to(MemoryUnit.MB).amount.toInt) - .vcpus(context.runtimeAttributes.cpu##) + .resourceRequirements( + ResourceRequirement.builder() + .`type`(ResourceType.MEMORY) + .value(context.runtimeAttributes.memory.to(MemoryUnit.MB).amount.toInt.toString) + .build(), + ResourceRequirement.builder() + .`type`(ResourceType.VCPU) + .value(context.runtimeAttributes.cpu.value.toString) + .build(), + ) .volumes( volumes.asJava) .mountPoints( mountPoints.asJava) .environment(environment.asJava), diff --git a/wes2cromwell/src/main/resources/sentry.properties b/wes2cromwell/src/main/resources/sentry.properties new file mode 100644 index 00000000000..ebc5aa32687 --- /dev/null +++ b/wes2cromwell/src/main/resources/sentry.properties @@ -0,0 +1,2 @@ +# Quiet warnings about missing sentry DSNs by providing an empty string +dsn= diff --git a/wom/src/main/scala/wom/util/YamlUtils.scala b/wom/src/main/scala/wom/util/YamlUtils.scala index 7c136955da0..e1356839ad2 100644 --- a/wom/src/main/scala/wom/util/YamlUtils.scala +++ b/wom/src/main/scala/wom/util/YamlUtils.scala @@ -2,7 +2,6 @@ package wom.util import java.io.StringReader import java.util - import com.typesafe.config.ConfigException.BadValue import com.typesafe.config.{Config, ConfigFactory} import eu.timepit.refined.api.Refined @@ -15,7 +14,7 @@ import org.yaml.snakeyaml.LoaderOptions import org.yaml.snakeyaml.comments.CommentLine import org.yaml.snakeyaml.composer.Composer import org.yaml.snakeyaml.constructor.Constructor -import org.yaml.snakeyaml.nodes.Node +import org.yaml.snakeyaml.nodes.{MappingNode, Node, NodeTuple} import org.yaml.snakeyaml.parser.ParserImpl import org.yaml.snakeyaml.reader.StreamReader import org.yaml.snakeyaml.resolver.Resolver @@ -105,14 +104,29 @@ object YamlUtils { super.composeScalarNode(anchor, blockComments) } - override def composeSequenceNode(anchor: String, blockComments: util.List[CommentLine]): Node = { + override def composeSequenceNode(anchor: String): Node = { + checkDepth() + super.composeSequenceNode(anchor) + } + + override def composeMappingNode(anchor: String): Node = { + checkDepth() + super.composeMappingNode(anchor) + } + + override def composeMappingChildren(children: util.List[NodeTuple], node: MappingNode): Unit = { + checkDepth() + super.composeMappingChildren(children, node) + } + + override def composeKeyNode(node: MappingNode): Node = { checkDepth() - super.composeSequenceNode(anchor, blockComments) + super.composeKeyNode(node) } - override def composeMappingNode(anchor: String, blockComments: util.List[CommentLine]): Node = { + override def composeValueNode(node: MappingNode): Node = { checkDepth() - super.composeMappingNode(anchor, blockComments) + super.composeValueNode(node) } }