From c3cb8b93397d1b376b07edc8daea19637806d42a Mon Sep 17 00:00:00 2001 From: Pavlo Smahin Date: Tue, 26 Mar 2024 12:57:11 +0200 Subject: [PATCH] feat(locations): implement domain event production for location create/update/delete (#1006) --- NEWS.md | 22 ++ ramls/dereferenceditem.json | 6 +- ramls/examples/{ => locations}/location.json | 0 ramls/examples/{ => locations}/locations.json | 0 ramls/location.raml | 10 +- ramls/{ => locations}/location.json | 10 +- ramls/{ => locations}/locations.json | 0 .../java/org/folio/InventoryKafkaTopic.java | 28 +- .../org/folio/persist/LocationRepository.java | 16 + .../java/org/folio/rest/impl/LocationApi.java | 282 ++---------------- .../org/folio/rest/impl/ShelfLocationApi.java | 8 +- .../org/folio/rest/impl/StorageHelper.java | 2 +- .../LocationDomainEventPublisher.java | 40 +++ .../services/location/LocationService.java | 199 ++++++++++++ .../folio/rest/impl/BaseIntegrationTest.java | 7 +- .../java/org/folio/rest/impl/LocationIT.java | 155 ++++++++++ .../topic/KafkaAdminClientServiceTest.java | 3 +- 17 files changed, 506 insertions(+), 282 deletions(-) rename ramls/examples/{ => locations}/location.json (100%) rename ramls/examples/{ => locations}/locations.json (100%) rename ramls/{ => locations}/location.json (93%) rename ramls/{ => locations}/locations.json (100%) create mode 100644 src/main/java/org/folio/persist/LocationRepository.java create mode 100644 src/main/java/org/folio/services/domainevent/LocationDomainEventPublisher.java create mode 100644 src/main/java/org/folio/services/location/LocationService.java create mode 100644 src/test/java/org/folio/rest/impl/LocationIT.java diff --git a/NEWS.md b/NEWS.md index 8c855bff5..bbbb39632 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,25 @@ +## v27.2.0 In progress +### Breaking changes +* Description ([ISSUE_NUMBER](https://issues.folio.org/browse/ISSUE_NUMBER)) + +### New APIs versions +* Provides `API_NAME vX.Y` +* Requires `API_NAME vX.Y` + +### Features +* Implement domain event production for location create/update/delete ([MODINVSTOR-1181](https://issues.folio.org/browse/MODINVSTOR-1181)) + +### Bug fixes +* Description ([ISSUE_NUMBER](https://issues.folio.org/browse/ISSUE_NUMBER)) + +### Tech Dept +* Description ([ISSUE_NUMBER](https://issues.folio.org/browse/ISSUE_NUMBER)) + +### Dependencies +* Bump `LIB_NAME` from `OLD_VERSION` to `NEW_VERSION` +* Add `LIB_NAME` `2.7.4` +* Remove `LIB_NAME` + ## v27.1.0 2024-03-19 ### New APIs versions * Provides `item-storage v10.1` diff --git a/ramls/dereferenceditem.json b/ramls/dereferenceditem.json index cbff50083..2a93352b6 100644 --- a/ramls/dereferenceditem.json +++ b/ramls/dereferenceditem.json @@ -307,17 +307,17 @@ "permanentLocation": { "type": "object", "description": "Permanent item location is the default location, shelving location, or holding which is a physical place where items are stored, or an Online location.", - "$ref": "location.json" + "$ref": "locations/location.json" }, "temporaryLocation": { "type": "object", "description": "Temporary item location is the temporarily location, shelving location, or holding which is a physical place where items are stored, or an Online location.", - "$ref": "location.json" + "$ref": "locations/location.json" }, "effectiveLocation": { "type": "object", "description": "Read only current home location for the item.", - "$ref": "location.json", + "$ref": "locations/location.json", "readonly": true }, "electronicAccess": { diff --git a/ramls/examples/location.json b/ramls/examples/locations/location.json similarity index 100% rename from ramls/examples/location.json rename to ramls/examples/locations/location.json diff --git a/ramls/examples/locations.json b/ramls/examples/locations/locations.json similarity index 100% rename from ramls/examples/locations.json rename to ramls/examples/locations/locations.json diff --git a/ramls/location.raml b/ramls/location.raml index 7d3ba6f76..f052cd3a5 100644 --- a/ramls/location.raml +++ b/ramls/location.raml @@ -10,8 +10,8 @@ documentation: (shelf) locations of the system types: - location: !include location.json - locations: !include locations.json + location: !include locations/location.json + locations: !include locations/locations.json errors: !include raml-util/schemas/errors.schema traits: @@ -27,8 +27,8 @@ resourceTypes: /locations: type: collection: - exampleCollection: !include examples/locations.json - exampleItem: !include examples/location.json + exampleCollection: !include examples/locations/locations.json + exampleItem: !include examples/locations/location.json schemaCollection: locations schemaItem: location get: @@ -54,7 +54,7 @@ resourceTypes: description: "Pass in the location id" type: collection-item: - exampleItem: !include examples/location.json + exampleItem: !include examples/locations/location.json schema: location get: put: diff --git a/ramls/location.json b/ramls/locations/location.json similarity index 93% rename from ramls/location.json rename to ramls/locations/location.json index cc132da9c..ae7321fd1 100644 --- a/ramls/location.json +++ b/ramls/locations/location.json @@ -79,13 +79,13 @@ }, "primaryServicePoint": { "description": "The UUID of the primary service point of this (shelf) location.", - "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$", + "format": "uuid", "type": "string" }, "primaryServicePointObject": { "type": "object", "description": "Dereferenced object for primary service point. This should really just be called 'primaryServicePoint', but the field containing the ID of this object has that name -- it should really be called 'primaryServicePointId' -- so we need something different for this one.", - "$ref": "servicepoint.json", + "$ref": "../servicepoint.json", "readonly": true, "folio:isVirtual": true, "folio:linkBase": "service-points", @@ -99,7 +99,7 @@ "items": { "description": "The UUID of a service point that belongs to this (shelf) location.", "type": "string", - "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$", + "format": "uuid", "not": { "type": "null" } @@ -110,7 +110,7 @@ "description": "List of dereferenced service points", "items": { "type": "object", - "$ref": "servicepoint.json" + "$ref": "../servicepoint.json" }, "readonly": true, "folio:isVirtual": true, @@ -121,7 +121,7 @@ }, "metadata": { "type": "object", - "$ref": "raml-util/schemas/metadata.schema", + "$ref": "../raml-util/schemas/metadata.schema", "readonly": true } }, diff --git a/ramls/locations.json b/ramls/locations/locations.json similarity index 100% rename from ramls/locations.json rename to ramls/locations/locations.json diff --git a/src/main/java/org/folio/InventoryKafkaTopic.java b/src/main/java/org/folio/InventoryKafkaTopic.java index ed2373a77..2e79f8434 100644 --- a/src/main/java/org/folio/InventoryKafkaTopic.java +++ b/src/main/java/org/folio/InventoryKafkaTopic.java @@ -1,9 +1,13 @@ package org.folio; +import java.util.Map; +import java.util.Optional; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.folio.kafka.services.KafkaTopic; public enum InventoryKafkaTopic implements KafkaTopic { + INSTANCE("instance"), ITEM("item"), HOLDINGS_RECORD("holdings-record"), @@ -11,13 +15,21 @@ public enum InventoryKafkaTopic implements KafkaTopic { BOUND_WITH("bound-with"), ASYNC_MIGRATION("async-migration"), SERVICE_POINT("service-point"), - CLASSIFICATION_TYPE("classification-type"); + CLASSIFICATION_TYPE("classification-type"), + LOCATION("location"); private static final String DEFAULT_NUM_PARTITIONS_PROPERTY = "KAFKA_DOMAIN_TOPIC_NUM_PARTITIONS"; private static final String DEFAULT_NUM_PARTITIONS_VALUE = "50"; - private static final String CLASSIFICATION_TYPE_NUM_PARTITIONS_PROPERTY = - "KAFKA_CLASSIFICATION_TYPE_TOPIC_NUM_PARTITIONS"; - private static final String CLASSIFICATION_TYPE_NUM_PARTITIONS_VALUE = "1"; + + /** + * Map where a key is {@link InventoryKafkaTopic} and value is a {@link Pair} of + * environment variable name that specifies number of partitions for the topic and default value is not specified. + */ + private static final Map> TOPIC_PARTITION_MAP = Map.of( + CLASSIFICATION_TYPE, Pair.of("KAFKA_CLASSIFICATION_TYPE_TOPIC_NUM_PARTITIONS", "1"), + LOCATION, Pair.of("KAFKA_LOCATION_TOPIC_NUM_PARTITIONS", "1") + ); + private final String topic; InventoryKafkaTopic(String topic) { @@ -36,11 +48,9 @@ public String topicName() { @Override public int numPartitions() { - if (this == CLASSIFICATION_TYPE) { - return getNumberOfPartitions(CLASSIFICATION_TYPE_NUM_PARTITIONS_PROPERTY, - CLASSIFICATION_TYPE_NUM_PARTITIONS_VALUE); - } - return getNumberOfPartitions(DEFAULT_NUM_PARTITIONS_PROPERTY, DEFAULT_NUM_PARTITIONS_VALUE); + return Optional.ofNullable(TOPIC_PARTITION_MAP.get(this)) + .map(pair -> getNumberOfPartitions(pair.getKey(), pair.getValue())) + .orElse(getNumberOfPartitions(DEFAULT_NUM_PARTITIONS_PROPERTY, DEFAULT_NUM_PARTITIONS_VALUE)); } private int getNumberOfPartitions(String propertyName, String defaultNumPartitions) { diff --git a/src/main/java/org/folio/persist/LocationRepository.java b/src/main/java/org/folio/persist/LocationRepository.java new file mode 100644 index 000000000..9f5fd8384 --- /dev/null +++ b/src/main/java/org/folio/persist/LocationRepository.java @@ -0,0 +1,16 @@ +package org.folio.persist; + +import static org.folio.rest.persist.PgUtil.postgresClient; +import static org.folio.services.location.LocationService.LOCATION_TABLE; + +import io.vertx.core.Context; +import java.util.Map; +import org.folio.rest.jaxrs.model.Location; + +public class LocationRepository extends AbstractRepository { + + public LocationRepository(Context context, Map okapiHeaders) { + super(postgresClient(context, okapiHeaders), LOCATION_TABLE, Location.class); + } + +} diff --git a/src/main/java/org/folio/rest/impl/LocationApi.java b/src/main/java/org/folio/rest/impl/LocationApi.java index 36377b9c7..0ecad10c8 100644 --- a/src/main/java/org/folio/rest/impl/LocationApi.java +++ b/src/main/java/org/folio/rest/impl/LocationApi.java @@ -1,293 +1,75 @@ package org.folio.rest.impl; -import static org.folio.rest.impl.StorageHelper.getCql; -import static org.folio.rest.impl.StorageHelper.logAndSaveError; -import static org.folio.rest.tools.utils.TenantTool.tenantId; +import static io.vertx.core.Future.succeededFuture; +import static org.folio.rest.support.EndpointFailureHandler.handleFailure; import io.vertx.core.AsyncResult; -import io.vertx.core.CompositeFuture; import io.vertx.core.Context; -import io.vertx.core.Future; import io.vertx.core.Handler; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.UUID; import javax.ws.rs.core.Response; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.folio.cql2pgjson.exception.FieldException; import org.folio.rest.annotations.Validate; import org.folio.rest.jaxrs.model.Location; -import org.folio.rest.jaxrs.model.Locations; -import org.folio.rest.persist.PgUtil; -import org.folio.rest.persist.PostgresClient; -import org.folio.rest.persist.cql.CQLWrapper; -import org.folio.rest.tools.utils.TenantTool; -import org.folio.rest.tools.utils.ValidationHelper; +import org.folio.services.location.LocationService; public class LocationApi implements org.folio.rest.jaxrs.resource.Locations { - public static final String LOCATION_TABLE = "location"; - public static final String URL_PREFIX = "/locations"; - public static final String SERVICEPOINT_IDS = "servicePointIds"; - public static final String PRIMARY_SERVICEPOINT = "primaryServicePoint"; - private static final Logger logger = LogManager.getLogger(); - // Note, this is the way to get rid of unnecessary try-catch blocks. Use the - // same everywhere! @Validate @Override - public void getLocations( - String query, - String totalRecords, - int offset, - int limit, - Map okapiHeaders, - Handler> asyncResultHandler, - Context vertxContext) { + public void getLocations(String query, String totalRecords, int offset, int limit, Map okapiHeaders, + Handler> asyncResultHandler, Context vertxContext) { - String tenantId = tenantId(okapiHeaders); - CQLWrapper cql; - try { - cql = getCql(query, limit, offset, LOCATION_TABLE); - } catch (FieldException e) { - String message = logAndSaveError(e); - logger.warn("XXX - Query exception ", e); - asyncResultHandler.handle(Future.succeededFuture( - GetLocationsResponse.respond400WithTextPlain(message))); - return; - } - PostgresClient.getInstance(vertxContext.owner(), tenantId) - .get(LOCATION_TABLE, Location.class, - new String[] {"*"}, cql, true, true, reply -> { - // netbeans, please indent here! - if (reply.failed()) { - String message = logAndSaveError(reply.cause()); - asyncResultHandler.handle(Future.succeededFuture( - GetLocationsResponse.respond400WithTextPlain(message))); - } else { - Locations shelfLocations = new Locations(); - List shelfLocationsList = reply.result().getResults(); - shelfLocations.setLocations(shelfLocationsList); - shelfLocations.setTotalRecords(reply.result().getResultInfo().getTotalRecords()); - asyncResultHandler.handle(Future.succeededFuture( - GetLocationsResponse.respond200WithApplicationJson(shelfLocations))); - } - }); + new LocationService(vertxContext, okapiHeaders).getByQuery(query, offset, limit) + .onSuccess(response -> asyncResultHandler.handle(succeededFuture(response))) + .onFailure(handleFailure(asyncResultHandler)); } @Validate @Override - public void postLocations( - Location entity, - Map okapiHeaders, - Handler> asyncResultHandler, - Context vertxContext) { - - String tenantId = tenantId(okapiHeaders); - - runLocationChecks(checkIdProvided(entity), checkAtLeastOneServicePoint(entity), - checkPrimaryServicePointRelationship(entity)) - .onComplete(checksResult -> { - if (checksResult.succeeded()) { - - PostgresClient.getInstance(vertxContext.owner(), tenantId).save(LOCATION_TABLE, entity.getId(), entity, - reply -> { - if (reply.failed()) { - String message = logAndSaveError(reply.cause()); - if (message != null && message.contains("duplicate key value violates unique constraint")) { - asyncResultHandler.handle( - Future.succeededFuture(PostLocationsResponse.respond422WithApplicationJson(ValidationHelper - .createValidationErrorMessage("shelflocation", entity.getId(), "Location already exists")))); - } else { - asyncResultHandler - .handle(Future.succeededFuture(PostLocationsResponse.respond500WithTextPlain(message))); - } - } else { - String responseObject = reply.result(); - entity.setId(responseObject); - asyncResultHandler - .handle(Future.succeededFuture(PostLocationsResponse.respond201WithApplicationJson(entity, - PostLocationsResponse.headersFor201().withLocation(URL_PREFIX + responseObject)))); - } - }); - - } else { - String message = logAndSaveError(checksResult.cause()); - asyncResultHandler.handle( - Future.succeededFuture(PostLocationsResponse.respond422WithApplicationJson(ValidationHelper - .createValidationErrorMessage(((LocationCheckError) checksResult.cause()).getField(), - entity.getId(), message)))); - } - - }); + public void postLocations(Location entity, Map okapiHeaders, + Handler> asyncResultHandler, Context vertxContext) { + new LocationService(vertxContext, okapiHeaders).create(entity) + .onSuccess(response -> asyncResultHandler.handle(succeededFuture(response))) + .onFailure(handleFailure(asyncResultHandler)); } @Validate @Override - public void deleteLocations(Map okapiHeaders, - Handler> asyncResultHandler, + public void deleteLocations(Map okapiHeaders, Handler> asyncResultHandler, Context vertxContext) { - String tenantId = TenantTool.tenantId(okapiHeaders); - PostgresClient.getInstance(vertxContext.owner(), TenantTool.calculateTenantId(tenantId)) - .execute(String.format("DELETE FROM %s_%s.%s", - tenantId, "mod_inventory_storage", LOCATION_TABLE), - reply -> { - if (reply.succeeded()) { - asyncResultHandler.handle(Future.succeededFuture(DeleteLocationsResponse.respond204())); - } else { - asyncResultHandler.handle(Future.succeededFuture( - DeleteLocationsResponse.respond500WithTextPlain(reply.cause().getMessage()))); - } - }); + new LocationService(vertxContext, okapiHeaders).deleteAll() + .onSuccess(response -> asyncResultHandler.handle(succeededFuture(response))) + .onFailure(handleFailure(asyncResultHandler)); } @Validate @Override - public void getLocationsById( - String id, - Map okapiHeaders, - Handler> asyncResultHandler, - Context vertxContext) { + public void getLocationsById(String id, Map okapiHeaders, + Handler> asyncResultHandler, Context vertxContext) { - PgUtil.getById(LOCATION_TABLE, Location.class, id, okapiHeaders, vertxContext, - GetLocationsByIdResponse.class, asyncResultHandler); + new LocationService(vertxContext, okapiHeaders).getById(id) + .onSuccess(response -> asyncResultHandler.handle(succeededFuture(response))) + .onFailure(handleFailure(asyncResultHandler)); } @Validate @Override - public void putLocationsById( - String id, - Location entity, - Map okapiHeaders, - Handler> asyncResultHandler, - Context vertxContext) { - runLocationChecks(checkIdChange(id, entity), checkAtLeastOneServicePoint(entity), - checkPrimaryServicePointRelationship(entity), checkForDuplicateServicePoints(entity)) - .onComplete(checksResult -> { - if (checksResult.failed()) { - String message = logAndSaveError(checksResult.cause()); - asyncResultHandler - .handle(Future.succeededFuture(PutLocationsByIdResponse.respond422WithApplicationJson( - ValidationHelper.createValidationErrorMessage( - ((LocationCheckError) checksResult.cause()).getField(), entity.getId(), message)))); - return; - } - PgUtil.put(LOCATION_TABLE, entity, id, okapiHeaders, vertxContext, - PutLocationsByIdResponse.class, asyncResultHandler); - }); + public void putLocationsById(String id, Location entity, Map okapiHeaders, + Handler> asyncResultHandler, Context vertxContext) { + new LocationService(vertxContext, okapiHeaders).update(id, entity) + .onSuccess(response -> asyncResultHandler.handle(succeededFuture(response))) + .onFailure(handleFailure(asyncResultHandler)); } @Validate @Override - public void deleteLocationsById( - String id, - Map okapiHeaders, - Handler> asyncResultHandler, - Context vertxContext) { - - PgUtil.deleteById(LOCATION_TABLE, id, okapiHeaders, vertxContext, - DeleteLocationsByIdResponse.class, asyncResultHandler); - } - - @SafeVarargs - private CompositeFuture runLocationChecks(Future... futures) { - List> allFutures = new ArrayList<>(Arrays.asList(futures)); - return Future.all(allFutures); - } - - private Future checkIdProvided(Location entity) { - - Future future = Future.succeededFuture(); - - String id = entity.getId(); - if (id == null) { - id = UUID.randomUUID().toString(); - entity.setId(id); - } - - return future; - - } - - private Future checkIdChange(String id, Location entity) { - - Future future = Future.succeededFuture(); - - if (!id.equals(entity.getId())) { - future = Future.failedFuture(new LocationCheckError("id", "Illegal operation: id cannot be changed")); - } - - return future; - - } - - private Future checkAtLeastOneServicePoint(Location entity) { - - Future future = Future.succeededFuture(); - - if (entity.getServicePointIds().isEmpty()) { - future = Future.failedFuture( - new LocationCheckError(SERVICEPOINT_IDS, "A location must have at least one Service Point assigned.")); - } - - return future; - - } - - private Future checkPrimaryServicePointRelationship(Location entity) { - - Future future = Future.succeededFuture(); - - if (!entity.getServicePointIds().contains(entity.getPrimaryServicePoint())) { - future = Future - .failedFuture(new LocationCheckError(PRIMARY_SERVICEPOINT, - "A Location's Primary Service point must be included as a Service Point.")); - } - - return future; - - } - - private Future checkForDuplicateServicePoints(Location entity) { - - Future future = Future.succeededFuture(); - - Set set = new HashSet<>(); - Set duplicateElements = new HashSet<>(); - - for (String element : entity.getServicePointIds()) { - if (!set.add(element)) { - duplicateElements.add(element); - } - } - - if (!duplicateElements.isEmpty()) { - future = Future.failedFuture( - new LocationCheckError(SERVICEPOINT_IDS, "A Service Point can only appear once on a Location.")); - } - - return future; - - } - - private static class LocationCheckError extends Exception { - - private final String field; - - LocationCheckError(String field, String message) { - super(message); - this.field = field; - } - - public String getField() { - return field; - } + public void deleteLocationsById(String id, Map okapiHeaders, + Handler> asyncResultHandler, Context vertxContext) { + new LocationService(vertxContext, okapiHeaders).delete(id) + .onSuccess(response -> asyncResultHandler.handle(succeededFuture(response))) + .onFailure(handleFailure(asyncResultHandler)); } } diff --git a/src/main/java/org/folio/rest/impl/ShelfLocationApi.java b/src/main/java/org/folio/rest/impl/ShelfLocationApi.java index 7f9575ccb..a387ca137 100644 --- a/src/main/java/org/folio/rest/impl/ShelfLocationApi.java +++ b/src/main/java/org/folio/rest/impl/ShelfLocationApi.java @@ -1,8 +1,8 @@ package org.folio.rest.impl; -import static org.folio.rest.impl.LocationApi.LOCATION_TABLE; import static org.folio.rest.impl.StorageHelper.getCql; import static org.folio.rest.impl.StorageHelper.logAndSaveError; +import static org.folio.services.location.LocationService.LOCATION_TABLE; import io.vertx.core.AsyncResult; import io.vertx.core.Context; @@ -32,8 +32,6 @@ * @author kurt */ public class ShelfLocationApi implements ShelfLocations { - public static final String SHELF_LOCATION_TABLE = "shelflocation"; - public static final String URL_PREFIX = "/shelflocations"; public static final String USE_NEW = "Use the new /locations interface instead."; /** @@ -47,10 +45,10 @@ public void getShelfLocations(String totalRecords, String query, int offset, int Handler> asyncResultHandler, Context vertxContext) { try { String tenantId = TenantTool.tenantId(okapiHeaders); - CQLWrapper cql = getCql(query, limit, offset, LocationApi.LOCATION_TABLE); + CQLWrapper cql = getCql(query, limit, offset, LOCATION_TABLE); PostgresClient.getInstance(vertxContext.owner(), tenantId) .get( - LocationApi.LOCATION_TABLE, Location.class, new String[] {"*"}, + LOCATION_TABLE, Location.class, new String[] {"*"}, cql, true, true, reply -> { try { if (reply.failed()) { diff --git a/src/main/java/org/folio/rest/impl/StorageHelper.java b/src/main/java/org/folio/rest/impl/StorageHelper.java index 14bc38312..5f01591b1 100644 --- a/src/main/java/org/folio/rest/impl/StorageHelper.java +++ b/src/main/java/org/folio/rest/impl/StorageHelper.java @@ -24,7 +24,7 @@ private StorageHelper() { throw new UnsupportedOperationException("Cannot instantiate utility class"); } - static String logAndSaveError(Throwable err) { + public static String logAndSaveError(Throwable err) { String message = err.getLocalizedMessage(); logger.error(message, err); return message; diff --git a/src/main/java/org/folio/services/domainevent/LocationDomainEventPublisher.java b/src/main/java/org/folio/services/domainevent/LocationDomainEventPublisher.java new file mode 100644 index 000000000..d4c2ddee0 --- /dev/null +++ b/src/main/java/org/folio/services/domainevent/LocationDomainEventPublisher.java @@ -0,0 +1,40 @@ +package org.folio.services.domainevent; + +import static io.vertx.core.Future.succeededFuture; +import static org.folio.InventoryKafkaTopic.LOCATION; +import static org.folio.rest.tools.utils.TenantTool.tenantId; + +import io.vertx.core.Context; +import io.vertx.core.Future; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import org.apache.commons.lang3.tuple.Pair; +import org.folio.persist.LocationRepository; +import org.folio.rest.jaxrs.model.Location; + +public class LocationDomainEventPublisher extends AbstractDomainEventPublisher { + + public LocationDomainEventPublisher(Context context, Map okapiHeaders) { + super(new LocationRepository(context, okapiHeaders), + new CommonDomainEventPublisher<>(context, okapiHeaders, LOCATION.fullTopicName(tenantId(okapiHeaders)))); + } + + @Override + protected Future>> getRecordIds(Collection locations) { + return succeededFuture(locations.stream() + .map(location -> pair(location.getId(), location)) + .toList() + ); + } + + @Override + protected Location convertDomainToEvent(String id, Location location) { + return location; + } + + @Override + protected String getId(Location location) { + return location.getId(); + } +} diff --git a/src/main/java/org/folio/services/location/LocationService.java b/src/main/java/org/folio/services/location/LocationService.java new file mode 100644 index 000000000..b743e0529 --- /dev/null +++ b/src/main/java/org/folio/services/location/LocationService.java @@ -0,0 +1,199 @@ +package org.folio.services.location; + +import static org.folio.rest.impl.StorageHelper.logAndSaveError; +import static org.folio.rest.persist.PgUtil.deleteById; +import static org.folio.rest.persist.PgUtil.get; +import static org.folio.rest.persist.PgUtil.post; +import static org.folio.rest.persist.PgUtil.put; + +import io.vertx.core.AsyncResult; +import io.vertx.core.Context; +import io.vertx.core.Future; +import io.vertx.sqlclient.Row; +import io.vertx.sqlclient.RowSet; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; +import javax.ws.rs.core.Response; +import org.folio.persist.LocationRepository; +import org.folio.rest.jaxrs.model.Error; +import org.folio.rest.jaxrs.model.Errors; +import org.folio.rest.jaxrs.model.Location; +import org.folio.rest.jaxrs.model.Locations; +import org.folio.rest.jaxrs.model.Parameter; +import org.folio.rest.jaxrs.resource.Locations.DeleteLocationsByIdResponse; +import org.folio.rest.jaxrs.resource.Locations.DeleteLocationsResponse; +import org.folio.rest.jaxrs.resource.Locations.GetLocationsByIdResponse; +import org.folio.rest.jaxrs.resource.Locations.GetLocationsResponse; +import org.folio.rest.jaxrs.resource.Locations.PostLocationsResponse; +import org.folio.rest.jaxrs.resource.Locations.PutLocationsByIdResponse; +import org.folio.rest.persist.PgUtil; +import org.folio.services.domainevent.LocationDomainEventPublisher; + +public class LocationService { + + public static final String LOCATION_TABLE = "location"; + + private final Context context; + private final Map okapiHeaders; + private final LocationRepository repository; + private final LocationDomainEventPublisher domainEventService; + + public LocationService(Context context, Map okapiHeaders) { + this.context = context; + this.okapiHeaders = okapiHeaders; + + this.repository = new LocationRepository(context, okapiHeaders); + this.domainEventService = new LocationDomainEventPublisher(context, okapiHeaders); + } + + public Future getByQuery(String cql, int offset, int limit) { + return get(LOCATION_TABLE, Location.class, Locations.class, + cql, offset, limit, okapiHeaders, context, GetLocationsResponse.class); + } + + public Future getById(String id) { + return PgUtil.getById(LOCATION_TABLE, Location.class, id, okapiHeaders, context, + GetLocationsByIdResponse.class); + } + + public Future create(Location location) { + var checks = List.of(checkAtLeastOneServicePoint(location), + checkForDuplicateServicePoints(location), + checkPrimaryServicePointRelationship(location)); + return doLocationChecks(checks) + .compose(exceptions -> { + if (exceptions.isEmpty()) { + return post(LOCATION_TABLE, location, okapiHeaders, context, PostLocationsResponse.class) + .onSuccess(domainEventService.publishCreated()); + } else { + var errors = toErrors(exceptions); + return Future.succeededFuture(PostLocationsResponse + .respond422WithApplicationJson(errors)); + } + }); + } + + public Future update(String id, Location location) { + if (location.getId() == null) { + location.setId(id); + } + var checks = List.of(checkIdChange(id, location), + checkAtLeastOneServicePoint(location), + checkForDuplicateServicePoints(location), + checkPrimaryServicePointRelationship(location)); + return doLocationChecks(checks) + .compose(exceptions -> { + if (exceptions.isEmpty()) { + return repository.getById(id) + .compose( + oldLocation -> put(LOCATION_TABLE, location, id, okapiHeaders, context, PutLocationsByIdResponse.class) + .onSuccess(domainEventService.publishUpdated(oldLocation)) + ); + } else { + return Future.succeededFuture(PostLocationsResponse + .respond422WithApplicationJson(toErrors(exceptions))); + } + }); + } + + public Future delete(String id) { + return repository.getById(id) + .compose(oldLocation -> deleteById(LOCATION_TABLE, id, okapiHeaders, context, + DeleteLocationsByIdResponse.class) + .onSuccess(domainEventService.publishRemoved(oldLocation)) + ); + } + + public Future deleteAll() { + return repository.deleteAll() + .transform(prepareDeleteAllResponse()) + .onSuccess(response -> domainEventService.publishAllRemoved()); + } + + private Errors toErrors(List exceptions) { + var errorList = exceptions.stream() + .map(LocationCheckException::toError) + .toList(); + return new Errors().withErrors(errorList); + } + + private Function>, Future> prepareDeleteAllResponse() { + return reply -> reply.succeeded() + ? Future.succeededFuture(DeleteLocationsResponse.respond204()) + : Future.succeededFuture( + DeleteLocationsResponse.respond500WithTextPlain(reply.cause().getMessage()) + ); + } + + private Future> doLocationChecks(List> optionals) { + return Future.succeededFuture(optionals.stream().filter(Optional::isPresent).map(Optional::get).toList()); + } + + private Optional checkIdChange(String id, Location entity) { + if (!id.equals(entity.getId())) { + return Optional.of(new LocationCheckException("id", "Illegal operation: id cannot be changed")); + } + + return Optional.empty(); + + } + + private Optional checkAtLeastOneServicePoint(Location entity) { + if (entity.getServicePointIds().isEmpty()) { + return Optional.of( + new LocationCheckException("servicePointIds", "A location must have at least one Service Point assigned.")); + } + + return Optional.empty(); + + } + + private Optional checkPrimaryServicePointRelationship(Location entity) { + if (!entity.getServicePointIds().contains(entity.getPrimaryServicePoint())) { + return Optional.of(new LocationCheckException("primaryServicePoint", + "A Location's Primary Service point must be included as a Service Point.")); + } + + return Optional.empty(); + + } + + private Optional checkForDuplicateServicePoints(Location entity) { + if (entity.getServicePointIds().size() != new HashSet<>(entity.getServicePointIds()).size()) { + return Optional.of( + new LocationCheckException("servicePointIds", "A Service Point can only appear once on a Location.")); + } + + return Optional.empty(); + + } + + private static class LocationCheckException extends Exception { + + private final String field; + + LocationCheckException(String field, String message) { + super(message); + this.field = field; + } + + public String getField() { + return this.field; + } + + public Error toError() { + Error error = new Error(); + Parameter p = new Parameter(); + p.setKey(getField()); + error.getParameters().add(p); + error.setMessage(logAndSaveError(this)); + error.setCode("-1"); + error.setType("1"); + return error; + } + } + +} diff --git a/src/test/java/org/folio/rest/impl/BaseIntegrationTest.java b/src/test/java/org/folio/rest/impl/BaseIntegrationTest.java index 0f3fa4931..0f429585b 100644 --- a/src/test/java/org/folio/rest/impl/BaseIntegrationTest.java +++ b/src/test/java/org/folio/rest/impl/BaseIntegrationTest.java @@ -84,6 +84,7 @@ protected static Future doRequest(HttpClient client, HttpMethod me @BeforeAll static void beforeAll(Vertx vertx, VertxTestContext ctx) throws Throwable { port = NetworkUtils.nextFreePort(); + System.setProperty("KAFKA_DOMAIN_TOPIC_NUM_PARTITIONS", "1"); System.setProperty("kafka-port", String.valueOf(KAFKA_CONTAINER.getFirstMappedPort())); System.setProperty("kafka-host", KAFKA_CONTAINER.getHost()); Envs.setEnv(POSTGRESQL_CONTAINER.getHost(), @@ -115,10 +116,10 @@ private static Future enableTenant(VertxTestContext ctx, HttpClien }))); } - private static JsonObject getJob(String moduleFrom, String moduleTo, boolean loadSample) { + private static JsonObject getJob(String moduleFrom, String moduleTo, boolean loadReference) { JsonArray ar = new JsonArray(); - ar.add(new JsonObject().put("key", "loadReference").put("value", "false")); - ar.add(new JsonObject().put("key", "loadSample").put("value", Boolean.toString(loadSample))); + ar.add(new JsonObject().put("key", "loadReference").put("value", Boolean.toString(loadReference))); + ar.add(new JsonObject().put("key", "loadSample").put("value", "false")); JsonObject jo = new JsonObject(); jo.put("parameters", ar); diff --git a/src/test/java/org/folio/rest/impl/LocationIT.java b/src/test/java/org/folio/rest/impl/LocationIT.java new file mode 100644 index 000000000..8b812cb1f --- /dev/null +++ b/src/test/java/org/folio/rest/impl/LocationIT.java @@ -0,0 +1,155 @@ +package org.folio.rest.impl; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.folio.HttpStatus.HTTP_UNPROCESSABLE_ENTITY; +import static org.folio.rest.impl.LocationUnitApi.CAMPUS_TABLE; +import static org.folio.rest.impl.LocationUnitApi.INSTITUTION_TABLE; +import static org.folio.rest.impl.LocationUnitApi.LIBRARY_TABLE; +import static org.folio.services.location.LocationService.LOCATION_TABLE; +import static org.folio.utility.RestUtility.TENANT_ID; + +import io.vertx.core.Vertx; +import io.vertx.core.http.HttpClient; +import io.vertx.core.json.JsonObject; +import io.vertx.junit5.VertxExtension; +import io.vertx.junit5.VertxTestContext; +import java.util.List; +import java.util.UUID; +import java.util.function.Function; +import java.util.function.UnaryOperator; +import org.folio.rest.jaxrs.model.Error; +import org.folio.rest.jaxrs.model.Errors; +import org.folio.rest.jaxrs.model.Location; +import org.folio.rest.jaxrs.model.Locations; +import org.folio.rest.jaxrs.model.Loccamp; +import org.folio.rest.jaxrs.model.Locinst; +import org.folio.rest.jaxrs.model.Loclib; +import org.folio.rest.jaxrs.model.Metadata; +import org.folio.rest.persist.PostgresClient; +import org.folio.rest.persist.cql.CQLWrapper; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; + +@ExtendWith(VertxExtension.class) +class LocationIT extends BaseReferenceDataIntegrationTest { + + private String institutionId; + private String campusId; + private String libraryId; + + @Override + protected String referenceTable() { + return LOCATION_TABLE; + } + + @Override + protected String resourceUrl() { + return "/locations"; + } + + @Override + protected Class targetClass() { + return Location.class; + } + + @Override + protected Class collectionClass() { + return Locations.class; + } + + @Override + protected Location sampleRecord() { + var primaryServicePoint = UUID.randomUUID(); + return new Location() + .withName("test-location") + .withCode("code") + .withCampusId(campusId) + .withLibraryId(libraryId) + .withInstitutionId(institutionId) + .withPrimaryServicePoint(primaryServicePoint) + .withServicePointIds(List.of(primaryServicePoint)); + } + + @Override + protected Function> collectionRecordsExtractor() { + return Locations::getLocations; + } + + @Override + protected List> recordFieldExtractors() { + return List.of(Location::getName); + } + + @Override + protected Function idExtractor() { + return Location::getId; + } + + @Override + protected Function metadataExtractor() { + return Location::getMetadata; + } + + @Override + protected UnaryOperator recordModifyingFunction() { + return classificationType -> classificationType.withName("name-updated"); + } + + @Override + protected List queries() { + return List.of("name==test-location", "code=code", "campusId==" + campusId, "libraryId==" + libraryId); + } + + @BeforeEach + void beforeEach(Vertx vertx, VertxTestContext ctx) { + var postgresClient = PostgresClient.getInstance(vertx, TENANT_ID); + var institution = new Locinst().withName("institution").withCode("ic"); + var campus = new Loccamp().withName("campus").withCode("cc"); + var library = new Loclib().withName("library").withCode("lc"); + postgresClient.save(INSTITUTION_TABLE, institution) + .compose(id -> { + institutionId = id; + return postgresClient.save(CAMPUS_TABLE, campus.withInstitutionId(id)); + }) + .compose(id -> { + campusId = id; + return postgresClient.save(LIBRARY_TABLE, library.withCampusId(id)); + }) + .onFailure(ctx::failNow) + .onSuccess(id -> { + libraryId = id; + ctx.completeNow(); + }); + } + + @AfterEach + void afterEach(Vertx vertx, VertxTestContext ctx) { + var postgresClient = PostgresClient.getInstance(vertx, TENANT_ID); + postgresClient.delete(referenceTable(), (CQLWrapper) null) + .compose(rows -> postgresClient.delete(LIBRARY_TABLE, (CQLWrapper) null)) + .compose(rows -> postgresClient.delete(CAMPUS_TABLE, (CQLWrapper) null)) + .compose(rows -> postgresClient.delete(INSTITUTION_TABLE, (CQLWrapper) null)) + .onFailure(ctx::failNow) + .onComplete(event -> ctx.completeNow()); + } + + @Test + void put_shouldReturn422_whenServicePointsNotSet(Vertx vertx, VertxTestContext ctx) { + HttpClient client = vertx.createHttpClient(); + var invalidRecord = sampleRecord().withServicePointIds(null).withId(UUID.randomUUID().toString()); + + doPut(client, resourceUrlById(invalidRecord.getId()), JsonObject.mapFrom(invalidRecord)) + .onComplete(verifyStatus(ctx, HTTP_UNPROCESSABLE_ENTITY)) + .onComplete(ctx.succeeding(response -> ctx.verify(() -> { + var actual = response.bodyAsClass(Errors.class); + assertThat(actual.getErrors()) + .hasSize(2) + .extracting(Error::getMessage) + .containsExactlyInAnyOrder("A location must have at least one Service Point assigned.", + "A Location's Primary Service point must be included as a Service Point."); + ctx.completeNow(); + }))); + } +} diff --git a/src/test/java/org/folio/services/kafka/topic/KafkaAdminClientServiceTest.java b/src/test/java/org/folio/services/kafka/topic/KafkaAdminClientServiceTest.java index f42e8a9c4..e3355156a 100644 --- a/src/test/java/org/folio/services/kafka/topic/KafkaAdminClientServiceTest.java +++ b/src/test/java/org/folio/services/kafka/topic/KafkaAdminClientServiceTest.java @@ -39,7 +39,8 @@ public class KafkaAdminClientServiceTest { "folio.foo-tenant.inventory.holdings-record", "folio.foo-tenant.inventory.item", "folio.foo-tenant.inventory.instance-contribution", "folio.foo-tenant.inventory.bound-with", "folio.foo-tenant.inventory.async-migration", - "folio.foo-tenant.inventory.service-point", "folio.foo-tenant.inventory.classification-type"); + "folio.foo-tenant.inventory.service-point", "folio.foo-tenant.inventory.classification-type", + "folio.foo-tenant.inventory.location"); private KafkaAdminClient mockClient; private Vertx vertx;