-
Notifications
You must be signed in to change notification settings - Fork 25k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Remove nGram
and edgeNGram
token filter names
#38911
Changes from 3 commits
d30c4c8
4db6cd4
e5fe2c3
fd9a314
b540986
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -81,7 +81,7 @@ public void testNgramHighlightingWithBrokenPositions() throws IOException { | |
.put("analysis.tokenizer.autocomplete.max_gram", 20) | ||
.put("analysis.tokenizer.autocomplete.min_gram", 1) | ||
.put("analysis.tokenizer.autocomplete.token_chars", "letter,digit") | ||
.put("analysis.tokenizer.autocomplete.type", "nGram") | ||
.put("analysis.tokenizer.autocomplete.type", "ngram") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is about the tokenizer, not the filter, but I think we also shouldn't use the camel case version of that one anymore. |
||
.put("analysis.filter.wordDelimiter.type", "word_delimiter") | ||
.putList("analysis.filter.wordDelimiter.type_table", | ||
"& => ALPHANUM", "| => ALPHANUM", "! => ALPHANUM", | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -23,38 +23,6 @@ | |
- match: { detail.tokenizer.tokens.0.token: Foo Bar! } | ||
|
||
--- | ||
"nGram": | ||
- do: | ||
indices.analyze: | ||
body: | ||
text: good | ||
explain: true | ||
tokenizer: | ||
type: nGram | ||
min_gram: 2 | ||
max_gram: 2 | ||
- length: { detail.tokenizer.tokens: 3 } | ||
- match: { detail.tokenizer.name: _anonymous_tokenizer } | ||
- match: { detail.tokenizer.tokens.0.token: go } | ||
- match: { detail.tokenizer.tokens.1.token: oo } | ||
- match: { detail.tokenizer.tokens.2.token: od } | ||
|
||
--- | ||
"nGram_exception": | ||
- skip: | ||
version: " - 6.99.99" | ||
reason: only starting from version 7.x this throws an error | ||
- do: | ||
catch: /The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to[:] \[1\] but was \[2\]\. This limit can be set by changing the \[index.max_ngram_diff\] index level setting\./ | ||
indices.analyze: | ||
body: | ||
text: good | ||
explain: true | ||
tokenizer: | ||
type: nGram | ||
min_gram: 2 | ||
max_gram: 4 | ||
--- | ||
"simple_pattern": | ||
- do: | ||
indices.analyze: | ||
|
@@ -133,7 +101,7 @@ | |
text: "foobar" | ||
explain: true | ||
tokenizer: | ||
type: nGram | ||
type: ngram | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same here, this is about the tokenizer, but we shouldn't use the camel case name here regardless |
||
min_gram: 3 | ||
max_gram: 3 | ||
- length: { detail.tokenizer.tokens: 4 } | ||
|
@@ -162,15 +130,31 @@ | |
body: | ||
text: "foo" | ||
explain: true | ||
tokenizer: nGram | ||
tokenizer: ngram | ||
- length: { detail.tokenizer.tokens: 5 } | ||
- match: { detail.tokenizer.name: nGram } | ||
- match: { detail.tokenizer.name: ngram } | ||
- match: { detail.tokenizer.tokens.0.token: f } | ||
- match: { detail.tokenizer.tokens.1.token: fo } | ||
- match: { detail.tokenizer.tokens.2.token: o } | ||
- match: { detail.tokenizer.tokens.3.token: oo } | ||
- match: { detail.tokenizer.tokens.4.token: o } | ||
|
||
--- | ||
"ngram_exception": | ||
- skip: | ||
version: " - 6.99.99" | ||
reason: only starting from version 7.x this throws an error | ||
- do: | ||
catch: /The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to[:] \[1\] but was \[2\]\. This limit can be set by changing the \[index.max_ngram_diff\] index level setting\./ | ||
indices.analyze: | ||
body: | ||
text: good | ||
explain: true | ||
tokenizer: | ||
type: ngram | ||
min_gram: 2 | ||
max_gram: 4 | ||
|
||
--- | ||
"edge_ngram": | ||
- do: | ||
|
@@ -194,7 +178,7 @@ | |
text: "foo" | ||
explain: true | ||
tokenizer: | ||
type: edgeNGram | ||
type: edge_ngram | ||
min_gram: 1 | ||
max_gram: 3 | ||
- length: { detail.tokenizer.tokens: 3 } | ||
|
@@ -219,9 +203,9 @@ | |
body: | ||
text: "foo" | ||
explain: true | ||
tokenizer: edgeNGram | ||
tokenizer: edge_ngram | ||
- length: { detail.tokenizer.tokens: 2 } | ||
- match: { detail.tokenizer.name: edgeNGram } | ||
- match: { detail.tokenizer.name: edge_ngram } | ||
- match: { detail.tokenizer.tokens.0.token: f } | ||
- match: { detail.tokenizer.tokens.1.token: fo } | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -76,7 +76,7 @@ | |
analysis: | ||
tokenizer: | ||
trigram: | ||
type: nGram | ||
type: ngram | ||
min_gram: 3 | ||
max_gram: 3 | ||
filter: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
These doc-changes should go back to 7.0 as well.