From 2f8557a6be4b827b66e9b1d032c9a763d69346d3 Mon Sep 17 00:00:00 2001 From: xinyual Date: Mon, 11 Mar 2024 14:30:53 +0800 Subject: [PATCH] avoid java doc change Signed-off-by: xinyual --- .../processor/chunker/FixedTokenLengthChunker.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/main/java/org/opensearch/neuralsearch/processor/chunker/FixedTokenLengthChunker.java b/src/main/java/org/opensearch/neuralsearch/processor/chunker/FixedTokenLengthChunker.java index cbdc57f30..bc39607cc 100644 --- a/src/main/java/org/opensearch/neuralsearch/processor/chunker/FixedTokenLengthChunker.java +++ b/src/main/java/org/opensearch/neuralsearch/processor/chunker/FixedTokenLengthChunker.java @@ -56,14 +56,14 @@ public FixedTokenLengthChunker(Map parameters) { } /** - * Validate the chunked passages for fixed token length algorithm, + * Validate and parse the parameters for fixed token length algorithm, * will throw IllegalArgumentException when parameters are invalid * * @param parameters a map containing parameters, containing the following parameters: - * 1. tokenizer the analyzer tokenizer in opensearch, please check https://opensearch.org/docs/latest/analyzers/tokenizers/index/ - * 2. token_limit the token limit for each chunked passage - * 3. overlap_rate the overlapping degree for each chunked passage, indicating how many token comes from the previous passage - * 4. max_token_count the max token limit for the tokenizer + * 1. tokenizer: the analyzer tokenizer in opensearch + * 2. token_limit: the token limit for each chunked passage + * 3. overlap_rate: the overlapping degree for each chunked passage, indicating how many token comes from the previous passage + * 4. max_token_count: the max token limit for the tokenizer * Here are requirements for parameters: * max_token_count and token_limit should be a positive integer * overlap_rate should be within range [0, 0.5]