Skip to content

Commit

Permalink
Merge pull request #470 from gchq/438-upgrade-java-version
Browse files Browse the repository at this point in the history
Issue 438 - Build on JDK 17 targeting Java 11
  • Loading branch information
kr565370 authored Jan 10, 2023
2 parents a66dff8 + 4eb3652 commit 54f8aed
Show file tree
Hide file tree
Showing 30 changed files with 89 additions and 71 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/check-status.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-java@v3
with:
java-version: '8'
distribution: 'adopt'
java-version: '17'
distribution: 'corretto'
- name: Cache dependencies
uses: actions/cache@v3
with:
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/chunk.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-java@v3
with:
java-version: '8'
distribution: 'adopt'
java-version: '17'
distribution: 'corretto'
- name: Cache dependencies
uses: actions/cache@v3
with:
Expand All @@ -41,7 +41,7 @@ jobs:
run: mvn --batch-mode clean install -am -pl ${{ steps.config.outputs.moduleList }} -Pquick,skipShade -Dmaven.repo.local=.m2/repository
working-directory: ./java
- name: Test
run: mvn --batch-mode --fail-at-end verify -pl ${{ steps.config.outputs.moduleList }} -Dmaven.repo.local=.m2/repository
run: mvn --batch-mode --fail-at-end verify -pl ${{ steps.config.outputs.moduleList }} -Dmaven.repo.local=.m2/repository -e
working-directory: ./java
- name: Check rate limits
id: rate-limit
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/maven-full.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-java@v3
with:
java-version: '8'
distribution: 'adopt'
java-version: '17'
distribution: 'corretto'
- name: Cache dependencies
uses: actions/cache@v3
with:
Expand Down
4 changes: 2 additions & 2 deletions java/bulk-import/bulk-import-runner/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
ARG BUILDER_IMAGE_NAME=maven
ARG BUILDER_IMAGE_TAG=3.8-openjdk-8-slim

ARG BASE_IMAGE_NAME=openjdk
ARG BASE_IMAGE_TAG=8-jre-slim
ARG BASE_IMAGE_NAME=amazoncorretto
ARG BASE_IMAGE_TAG=11

ARG SPARK_VERSION=3.1.2
ARG HADOOP_VERSION=3.2.1
Expand Down
2 changes: 1 addition & 1 deletion java/cdk/src/main/java/sleeper/cdk/stack/AthenaStack.java
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ private Function createConnector(String className, String instanceId, int logRet
.memorySize(memory)
.timeout(Duration.seconds(timeout))
.code(s3Code)
.runtime(Runtime.JAVA_8)
.runtime(Runtime.JAVA_11)
.logRetention(Utils.getRetentionDays(logRetentionDays))
.handler(className)
.environment(env)
Expand Down
6 changes: 3 additions & 3 deletions java/cdk/src/main/java/sleeper/cdk/stack/CompactionStack.java
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ private void lambdaToFindCompactionJobsThatShouldBeCreated(IBucket configBucket,
.create(this, "JobCreationLambda")
.functionName(functionName)
.description("Scan DynamoDB looking for files that need merging and create appropriate job specs in DynamoDB")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_8)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.memorySize(instanceProperties.getInt(COMPACTION_JOB_CREATION_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(COMPACTION_JOB_CREATION_LAMBDA_TIMEOUT_IN_SECONDS)))
.code(code)
Expand Down Expand Up @@ -481,7 +481,7 @@ private void lambdaToCreateCompactionTasks(IBucket configBucket,
.create(this, "CompactionTasksCreator")
.functionName(functionName)
.description("If there are compaction jobs on queue create tasks to run them")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_8)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.memorySize(instanceProperties.getInt(TASK_RUNNER_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TASK_RUNNER_LAMBDA_TIMEOUT_IN_SECONDS)))
.code(code)
Expand Down Expand Up @@ -539,7 +539,7 @@ private void lambdaToCreateSplittingCompactionTasks(IBucket configBucket,
.create(this, "SplittingCompactionTasksCreator")
.functionName(functionName)
.description("If there are splitting compaction jobs on queue create tasks to run them")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_8)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.memorySize(instanceProperties.getInt(TASK_RUNNER_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TASK_RUNNER_LAMBDA_TIMEOUT_IN_SECONDS)))
.code(code)
Expand Down
2 changes: 2 additions & 0 deletions java/cdk/src/main/java/sleeper/cdk/stack/DashboardStack.java
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
*/
package sleeper.cdk.stack;

import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import sleeper.cdk.Utils;
import sleeper.configuration.properties.InstanceProperties;
import sleeper.configuration.properties.UserDefinedInstanceProperty;
Expand Down Expand Up @@ -46,6 +47,7 @@

import static sleeper.configuration.properties.UserDefinedInstanceProperty.DASHBOARD_TIME_WINDOW_MINUTES;

@SuppressFBWarnings("MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR")
public class DashboardStack extends NestedStack {
private final IngestStack ingestStack;
private final CompactionStack compactionStack;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ public GarbageCollectorStack(
.create(this, "GarbageCollectorLambda")
.functionName(functionName)
.description("Scan DynamoDB looking for files that need deleting and delete them")
.runtime(Runtime.JAVA_8)
.runtime(Runtime.JAVA_11)
.memorySize(instanceProperties.getInt(GARBAGE_COLLECTOR_LAMBDA_MEMORY_IN_MB))
// Timeout is set to 90% of the period with which this runs to avoid 2 running simultaneously,
// with a maximum of 900 seconds (15 minutes) which is the maximum execution time
Expand Down
2 changes: 1 addition & 1 deletion java/cdk/src/main/java/sleeper/cdk/stack/IngestStack.java
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ private void lambdaToCreateIngestTasks(IBucket configBucket, Queue ingestJobQueu
.create(this, "IngestTasksCreator")
.functionName(functionName)
.description("If there are ingest jobs on queue create tasks to run them")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_8)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.memorySize(instanceProperties.getInt(TASK_RUNNER_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(TASK_RUNNER_LAMBDA_TIMEOUT_IN_SECONDS)))
.code(code)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ public PartitionSplittingStack(Construct scope,
.create(this, "FindPartitionsToSplitLambda")
.functionName(functionName)
.description("Scan DynamoDB looking for partitions that need splitting")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_8)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.memorySize(instanceProperties.getInt(FIND_PARTITIONS_TO_SPLIT_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(FIND_PARTITIONS_TO_SPLIT_TIMEOUT_IN_SECONDS)))
.code(code)
Expand Down Expand Up @@ -175,7 +175,7 @@ public PartitionSplittingStack(Construct scope,
.create(this, "SplitPartitionLambda")
.functionName(functionName)
.description("Triggered by an SQS event that contains a partition to split")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_8)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.memorySize(instanceProperties.getInt(SPLIT_PARTITIONS_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(SPLIT_PARTITIONS_TIMEOUT_IN_SECONDS)))
.code(code)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ public PropertiesStack(Construct scope,
.environment(Utils.createDefaultEnvironment(instanceProperties))
.description("Lambda for writing instance properties to S3 upon initialisation and teardown")
.logRetention(Utils.getRetentionDays(instanceProperties.getInt(LOG_RETENTION_IN_DAYS)))
.runtime(Runtime.JAVA_8)
.runtime(Runtime.JAVA_11)
.build());

configBucket.grantWrite(propertiesWriterLambda);
Expand Down
4 changes: 2 additions & 2 deletions java/cdk/src/main/java/sleeper/cdk/stack/QueryStack.java
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ public QueryStack(Construct scope,
.create(this, "QueryExecutorLambda")
.functionName(functionName)
.description("When a query arrives on the query SQS queue, this lambda is invoked to perform the query")
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_8)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.memorySize(instanceProperties.getInt(QUERY_PROCESSOR_LAMBDA_MEMORY_IN_MB))
.timeout(Duration.seconds(instanceProperties.getInt(QUERY_PROCESSOR_LAMBDA_TIMEOUT_IN_SECONDS)))
.code(code)
Expand Down Expand Up @@ -282,7 +282,7 @@ protected void setupWebSocketApi(Code queryCode, InstanceProperties instanceProp
.memorySize(256)
.logRetention(Utils.getRetentionDays(instanceProperties.getInt(LOG_RETENTION_IN_DAYS)))
.timeout(Duration.seconds(29))
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_8)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.build();
queriesQueue.grantSendMessages(handler);
configBucket.grantRead(handler);
Expand Down
4 changes: 2 additions & 2 deletions java/cdk/src/main/java/sleeper/cdk/stack/TableStack.java
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ public TableStack(
.environment(Utils.createDefaultEnvironment(instanceProperties))
.description("Lambda for handling initialisation and teardown of Sleeper Tables")
.logRetention(Utils.getRetentionDays(instanceProperties.getInt(LOG_RETENTION_IN_DAYS)))
.runtime(Runtime.JAVA_8)
.runtime(Runtime.JAVA_11)
.build());

configBucket.grantReadWrite(sleeperTableLambda);
Expand Down Expand Up @@ -207,7 +207,7 @@ private void createTable(InstanceProperties instanceProperties,
Function tableMetricsPublisher = Function.Builder.create(this, tableName + "MetricsPublisher")
.description("Generates metrics for a Sleeper table based on info in its state store, and publishes them to CloudWatch")
.code(Code.fromBucket(jarsBucket, "metrics-" + instanceProperties.get(VERSION) + ".jar"))
.runtime(Runtime.JAVA_8)
.runtime(Runtime.JAVA_11)
.handler("sleeper.metrics.TableMetricsLambda::handleRequest")
.memorySize(256)
.timeout(Duration.seconds(60))
Expand Down
2 changes: 1 addition & 1 deletion java/cdk/src/main/java/sleeper/cdk/stack/VpcStack.java
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ public VpcStack(Construct scope, String id, InstanceProperties instancePropertie
.memorySize(2048)
.description("Lambda for checking the VPC has an associated S3 endpoint")
.logRetention(Utils.getRetentionDays(instanceProperties.getInt(LOG_RETENTION_IN_DAYS)))
.runtime(Runtime.JAVA_8)
.runtime(Runtime.JAVA_11)
.build());

vpcCheckLambda.addToRolePolicy(new PolicyStatement(new PolicyStatementProps.Builder()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ protected void createBulkImportJobStarterFunction() {
.memorySize(1024)
.timeout(Duration.seconds(20))
.environment(env)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_8)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.handler("sleeper.bulkimport.starter.BulkImportStarter")
.logRetention(Utils.getRetentionDays(instanceProperties.getInt(LOG_RETENTION_IN_DAYS)))
.events(Lists.newArrayList(new SqsEventSource(bulkImportJobQueue)))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ public EksBulkImportStack(
.memorySize(1024)
.timeout(Duration.seconds(10))
.environment(env)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_8)
.runtime(software.amazon.awscdk.services.lambda.Runtime.JAVA_11)
.handler("sleeper.bulkimport.starter.BulkImportStarter")
.logRetention(Utils.getRetentionDays(instanceProperties.getInt(UserDefinedInstanceProperty.LOG_RETENTION_IN_DAYS)))
.events(Lists.newArrayList(new SqsEventSource(bulkImportJobQueue)))
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2023 Crown Copyright
* Copyright 2022-2023 Crown Copyright
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2023 Crown Copyright
* Copyright 2022-2023 Crown Copyright
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2023 Crown Copyright
* Copyright 2022-2023 Crown Copyright
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2023 Crown Copyright
* Copyright 2022-2023 Crown Copyright
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down
4 changes: 1 addition & 3 deletions java/compaction/compaction-job-execution/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM openjdk:8-jre

RUN apt-get update
FROM amazoncorretto:11

COPY compaction-job-execution.jar /compaction-job-execution.jar
COPY run.sh /run.sh
Expand Down
4 changes: 1 addition & 3 deletions java/ingest/ingest-runner/docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM openjdk:8-jre

RUN apt-get update
FROM amazoncorretto:11

COPY ingest.jar /ingest.jar
COPY run.sh /run.sh
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2022 Crown Copyright
* Copyright 2022-2023 Crown Copyright
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -17,6 +17,7 @@

import org.junit.Test;
import sleeper.ingest.job.IngestJob;
import sleeper.ingest.job.status.IngestJobStatusStore;
import sleeper.ingest.status.store.testutils.DynamoDBIngestJobStatusStoreTestBase;

import java.time.Instant;
Expand All @@ -33,8 +34,11 @@ public void shouldReturnIngestJobsInPeriod() {
// Given
IngestJob job1 = jobWithFiles("file1");
IngestJob job2 = jobWithFiles("file2");
Instant startedTime1 = Instant.now();
Instant startedTime2 = Instant.now();
Instant startedTime1 = Instant.parse("2023-01-03T14:50:00.001Z");
Instant startedUpdateTime1 = Instant.parse("2023-01-03T14:50:00.123Z");
Instant startedTime2 = Instant.parse("2023-01-03T14:55:00.001Z");
Instant startedUpdateTime2 = Instant.parse("2023-01-03T14:55:00.123Z");
IngestJobStatusStore store = storeWithUpdateTimes(startedUpdateTime1, startedUpdateTime2);

// When
store.jobStarted(DEFAULT_TASK_ID, job1, startedTime1);
Expand All @@ -54,14 +58,16 @@ public void shouldReturnIngestJobsInPeriod() {
public void shouldExcludeIngestJobOutsidePeriod() {
// Given
IngestJob job = jobWithFiles("file");
Instant startedTime = Instant.now();
Instant periodStart = Instant.parse("2023-01-01T14:00:00.001Z");
Instant periodEnd = Instant.parse("2023-01-02T14:00:00.001Z");
Instant startedTime = Instant.parse("2023-01-03T14:50:00.001Z");
Instant startedUpdateTime = Instant.parse("2023-01-03T14:50:00.123Z");
IngestJobStatusStore store = storeWithUpdateTimes(startedUpdateTime);

// When
store.jobStarted(DEFAULT_TASK_ID, job, startedTime);

// Then
Instant periodStart = Instant.now().plus(Period.ofDays(1));
Instant periodEnd = periodStart.plus(Period.ofDays(1));
assertThat(store.getJobsInTimePeriod(tableName, periodStart, periodEnd)).isEmpty();
}

Expand All @@ -70,8 +76,11 @@ public void shouldExcludeIngestJobInOtherTable() {
// Given
IngestJob job1 = jobWithFiles("file1");
IngestJob job2 = jobWithTableAndFiles("other-table", "file2");
Instant startedTime1 = Instant.now();
Instant startedTime2 = Instant.now();
Instant startedTime1 = Instant.parse("2023-01-03T14:50:00.001Z");
Instant startedUpdateTime1 = Instant.parse("2023-01-03T14:50:00.123Z");
Instant startedTime2 = Instant.parse("2023-01-03T14:55:00.001Z");
Instant startedUpdateTime2 = Instant.parse("2023-01-03T14:55:00.123Z");
IngestJobStatusStore store = storeWithUpdateTimes(startedUpdateTime1, startedUpdateTime2);

// When
store.jobStarted(DEFAULT_TASK_ID, job1, startedTime1);
Expand All @@ -86,18 +95,19 @@ public void shouldExcludeIngestJobInOtherTable() {
}

@Test
public void shouldIncludeFinishedStatusUpdateOutsidePeriod() throws Exception {
public void shouldIncludeFinishedStatusUpdateOutsidePeriod() {
// Given
IngestJob job = jobWithFiles("file");
Instant periodStart = Instant.now().minus(Period.ofDays(1));
Instant startedTime = Instant.now();
Instant periodStart = Instant.parse("2023-01-02T14:52:00.001Z");
Instant startedTime = Instant.parse("2023-01-03T14:50:00.001Z");
Instant startedUpdateTime = Instant.parse("2023-01-03T14:50:00.123Z");
Instant periodEnd = Instant.parse("2023-01-03T14:52:00.001Z");
Instant finishedTime = Instant.parse("2023-01-03T14:56:00.001Z");
Instant finishedUpdateTime = Instant.parse("2023-01-03T14:56:00.123Z");
IngestJobStatusStore store = storeWithUpdateTimes(startedUpdateTime, finishedUpdateTime);

// When
store.jobStarted(DEFAULT_TASK_ID, job, startedTime);
Thread.sleep(1);
Instant periodEnd = Instant.now();
Thread.sleep(1);
Instant finishedTime = Instant.now();
store.jobFinished(DEFAULT_TASK_ID, job, defaultSummary(startedTime, finishedTime));

// Then
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ public void shouldReturnIngestJobsByTaskId() {
String searchingTaskId = "test-task";
IngestJob job1 = jobWithFiles("file1");
IngestJob job2 = jobWithFiles("file2");
Instant startedTime1 = Instant.now();
Instant startedTime2 = Instant.now();
Instant startedTime1 = Instant.parse("2022-12-14T13:51:12.001Z");
Instant startedTime2 = Instant.parse("2022-12-14T13:52:12.001Z");

// When
store.jobStarted(searchingTaskId, job1, startedTime1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ public void shouldReportSeveralIngestJobsStarted() {
// Given
IngestJob job1 = jobWithFiles("file1");
IngestJob job2 = jobWithFiles("file2");
Instant startedTime1 = Instant.now();
Instant startedTime2 = Instant.now();
Instant startedTime1 = Instant.parse("2022-12-14T13:51:12.001Z");
Instant startedTime2 = Instant.parse("2022-12-14T13:56:12.001Z");

// When
store.jobStarted(DEFAULT_TASK_ID, job1, startedTime1);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2022 Crown Copyright
* Copyright 2022-2023 Crown Copyright
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -71,6 +71,10 @@ public void tearDown() {

protected IngestJobStatusStore storeWithTimeToLiveAndUpdateTimes(Duration timeToLive, Instant... updateTimes) {
instanceProperties.set(INGEST_JOB_STATUS_TTL_IN_SECONDS, "" + timeToLive.getSeconds());
return storeWithUpdateTimes(updateTimes);
}

protected IngestJobStatusStore storeWithUpdateTimes(Instant... updateTimes) {
return new DynamoDBIngestJobStatusStore(dynamoDBClient, instanceProperties,
Arrays.stream(updateTimes).iterator()::next);
}
Expand Down
Loading

0 comments on commit 54f8aed

Please sign in to comment.