Skip to content

Commit

Permalink
add unit test for tokenization excpetion in fixed token length algorithm
Browse files Browse the repository at this point in the history
Signed-off-by: yuye-aws <[email protected]>
  • Loading branch information
yuye-aws committed Mar 15, 2024
1 parent a524954 commit 10f6568
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
*/
package org.opensearch.neuralsearch.processor.chunker;

import java.io.IOException;
import java.util.Locale;
import java.util.Map;
import java.util.List;
Expand Down Expand Up @@ -156,7 +155,7 @@ private List<AnalyzeToken> tokenize(final String content, final String tokenizer
try {
AnalyzeAction.Response analyzeResponse = analyze(analyzeRequest, analysisRegistry, null, maxTokenCount);
return analyzeResponse.getTokens();
} catch (IOException e) {
} catch (Exception e) {
throw new IllegalStateException(
String.format(Locale.ROOT, "%s algorithm encounters exception in tokenization: %s", ALGORITHM_NAME, e.getMessage()),
e
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,11 @@ public Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> getTokeniz
return new FixedTokenLengthChunker(nonRuntimeParameters);
}

public void testValidateAndParseParameters_whenNoParams_thenSuccessful() {
public void testParseParameters_whenNoParams_thenSuccessful() {
fixedTokenLengthChunker.parseParameters(Map.of());
}

public void testValidateAndParseParameters_whenIllegalTokenLimitType_thenFail() {
public void testParseParameters_whenIllegalTokenLimitType_thenFail() {
Map<String, Object> parameters = new HashMap<>();
parameters.put(TOKEN_LIMIT_FIELD, "invalid token limit");
IllegalArgumentException illegalArgumentException = assertThrows(
Expand All @@ -79,7 +79,7 @@ public void testValidateAndParseParameters_whenIllegalTokenLimitType_thenFail()
);
}

public void testValidateAndParseParameters_whenIllegalTokenLimitValue_thenFail() {
public void testParseParameters_whenIllegalTokenLimitValue_thenFail() {
Map<String, Object> parameters = new HashMap<>();
parameters.put(TOKEN_LIMIT_FIELD, -1);
IllegalArgumentException illegalArgumentException = assertThrows(
Expand All @@ -92,7 +92,7 @@ public void testValidateAndParseParameters_whenIllegalTokenLimitValue_thenFail()
);
}

public void testValidateAndParseParameters_whenIllegalOverlapRateType_thenFail() {
public void testParseParameters_whenIllegalOverlapRateType_thenFail() {
Map<String, Object> parameters = new HashMap<>();
parameters.put(OVERLAP_RATE_FIELD, "invalid overlap rate");
IllegalArgumentException illegalArgumentException = assertThrows(
Expand All @@ -105,7 +105,7 @@ public void testValidateAndParseParameters_whenIllegalOverlapRateType_thenFail()
);
}

public void testValidateAndParseParameters_whenIllegalOverlapRateValue_thenFail() {
public void testParseParameters_whenIllegalOverlapRateValue_thenFail() {
Map<String, Object> parameters = new HashMap<>();
parameters.put(OVERLAP_RATE_FIELD, 0.6);
IllegalArgumentException illegalArgumentException = assertThrows(
Expand All @@ -118,7 +118,7 @@ public void testValidateAndParseParameters_whenIllegalOverlapRateValue_thenFail(
);
}

public void testValidateAndParseParameters_whenIllegalTokenizerType_thenFail() {
public void testParseParameters_whenIllegalTokenizerType_thenFail() {
Map<String, Object> parameters = new HashMap<>();
parameters.put(TOKENIZER_FIELD, 111);
IllegalArgumentException illegalArgumentException = assertThrows(
Expand All @@ -131,7 +131,7 @@ public void testValidateAndParseParameters_whenIllegalTokenizerType_thenFail() {
);
}

public void testValidateAndParseParameters_whenUnsupportedTokenizer_thenFail() {
public void testParseParameters_whenUnsupportedTokenizer_thenFail() {
String ngramTokenizer = "ngram";
Map<String, Object> parameters = Map.of(TOKENIZER_FIELD, ngramTokenizer);
IllegalArgumentException illegalArgumentException = assertThrows(
Expand All @@ -142,7 +142,22 @@ public void testValidateAndParseParameters_whenUnsupportedTokenizer_thenFail() {
.contains(String.format(Locale.ROOT, "Tokenizer [%s] is not supported for [%s] algorithm.", ngramTokenizer, ALGORITHM_NAME)));
}

public void testChunk_withTokenLimit_10() {
public void testChunk_whenTokenizationException_thenFail() {
// lowercase tokenizer is not supported in unit tests
String lowercaseTokenizer = "lowercase";
Map<String, Object> parameters = Map.of(TOKENIZER_FIELD, lowercaseTokenizer);
FixedTokenLengthChunker fixedTokenLengthChunker = createFixedTokenLengthChunker(parameters);
String content =
"This is an example document to be chunked. The document contains a single paragraph, two sentences and 24 tokens by standard tokenizer in OpenSearch.";
IllegalStateException illegalStateException = assertThrows(
IllegalStateException.class,
() -> fixedTokenLengthChunker.chunk(content, parameters)
);
assert (illegalStateException.getMessage()
.contains(String.format(Locale.ROOT, "%s algorithm encounters exception in tokenization", ALGORITHM_NAME)));
}

public void testChunk_withTokenLimit10_thenSucceed() {
Map<String, Object> parameters = new HashMap<>();
parameters.put(TOKEN_LIMIT_FIELD, 10);
parameters.put(TOKENIZER_FIELD, "standard");
Expand All @@ -159,7 +174,7 @@ public void testChunk_withTokenLimit_10() {
assertEquals(expectedPassages, passages);
}

public void testChunk_withTokenLimit_20() {
public void testChunk_withTokenLimit20_thenSucceed() {
Map<String, Object> parameters = new HashMap<>();
parameters.put(TOKEN_LIMIT_FIELD, 20);
parameters.put(TOKENIZER_FIELD, "standard");
Expand All @@ -177,7 +192,7 @@ public void testChunk_withTokenLimit_20() {
assertEquals(expectedPassages, passages);
}

public void testChunk_withOverlapRate_half() {
public void testChunk_withOverlapRateHalf_thenSucceed() {
Map<String, Object> parameters = new HashMap<>();
parameters.put(TOKEN_LIMIT_FIELD, 10);
parameters.put(OVERLAP_RATE_FIELD, 0.5);
Expand Down

0 comments on commit 10f6568

Please sign in to comment.