diff --git a/src/test/java/org/opensearch/neuralsearch/processor/DocumentChunkingProcessorTests.java b/src/test/java/org/opensearch/neuralsearch/processor/DocumentChunkingProcessorTests.java index 3b2e7eba4..ae4ff17de 100644 --- a/src/test/java/org/opensearch/neuralsearch/processor/DocumentChunkingProcessorTests.java +++ b/src/test/java/org/opensearch/neuralsearch/processor/DocumentChunkingProcessorTests.java @@ -97,6 +97,12 @@ private Map createFixedTokenLengthParameters() { return parameters; } + private List> createSourceDataListNestedMap() { + Map documents = new HashMap<>(); + documents.put(INPUT_FIELD, createSourceDataString()); + return List.of(documents, documents); + } + private Map createFixedTokenLengthParametersWithMaxChunk(int maxChunkNum) { Map parameters = new HashMap<>(); parameters.put(FixedTokenLengthChunker.TOKEN_LIMIT_FIELD, 10); @@ -398,8 +404,8 @@ public void testExecute_withFixedTokenLength_andFieldMapNestedMap_sourceList_suc List expectedPassages = new ArrayList<>(); expectedPassages.add("This is an example document to be chunked The document"); - expectedPassages.add("The document contains a single paragraph two sentences and 24"); - expectedPassages.add("and 24 tokens by standard tokenizer in OpenSearch"); + expectedPassages.add("contains a single paragraph two sentences and 24 tokens by"); + expectedPassages.add("standard tokenizer in OpenSearch"); assert (nestedResult instanceof List); assertEquals(((List) nestedResult).size(), 2); for (Object result : (List) nestedResult) {