Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Exposing lucene 6.x minhash filter. #20206

Merged
merged 2 commits into from
Sep 7, 2016
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.elasticsearch.index.analysis;

import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.minhash.MinHashFilterFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;

import java.util.HashMap;
import java.util.Map;

/**
* TokenFilterFactoryAdapter for {@link MinHashFilterFactory}
*
*/
public class MinHashTokenFilterFactory extends AbstractTokenFilterFactory {

private final MinHashFilterFactory minHashFilterFactory;

public MinHashTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
minHashFilterFactory = new MinHashFilterFactory(convertSettings(settings));
}

@Override
public TokenStream create(TokenStream tokenStream) {
return minHashFilterFactory.create(tokenStream);
}

private Map<String, String> convertSettings(Settings settings) {
Map<String, String> settingMap = new HashMap<>();
settingMap.put("hashCount", settings.get("hash_count"));
settingMap.put("bucketCount", settings.get("bucket_count"));
settingMap.put("hashSetSize", settings.get("hash_set_size"));
settingMap.put("withRotation", settings.get("with_rotation"));
return settingMap;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@
import org.elasticsearch.index.analysis.LowerCaseTokenFilterFactory;
import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory;
import org.elasticsearch.index.analysis.MappingCharFilterFactory;
import org.elasticsearch.index.analysis.MinHashTokenFilterFactory;
import org.elasticsearch.index.analysis.NGramTokenFilterFactory;
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider;
Expand Down Expand Up @@ -214,6 +215,7 @@ private NamedRegistry<AnalysisProvider<TokenFilterFactory>> setupTokenFilters(Li
tokenFilters.register("edgeNGram", EdgeNGramTokenFilterFactory::new);
tokenFilters.register("edge_ngram", EdgeNGramTokenFilterFactory::new);
tokenFilters.register("shingle", ShingleTokenFilterFactory::new);
tokenFilters.register("min_hash", MinHashTokenFilterFactory::new);
tokenFilters.register("unique", UniqueTokenFilterFactory::new);
tokenFilters.register("truncate", requriesAnalysisSettings(TruncateTokenFilterFactory::new));
tokenFilters.register("trim", TrimTokenFilterFactory::new);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.elasticsearch.index.analysis;

import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTokenStreamTestCase;

import java.io.IOException;
import java.io.StringReader;

public class MinHashFilterFactoryTests extends ESTokenStreamTestCase {
public void testDefault() throws IOException {
int default_hash_count = 1;
int default_bucket_size = 512;
int default_hash_set_size = 1;
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("min_hash");
String source = "the quick brown fox";
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));

// with_rotation is true by default, and hash_set_size is 1, so even though the source doesn't
// have enough tokens to fill all the buckets, we still expect 512 tokens.
assertStreamHasNumberOfTokens(tokenFilter.create(tokenizer),
default_hash_count * default_bucket_size * default_hash_set_size);
}

public void testSettings() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.filter.test_min_hash.type", "min_hash")
.put("index.analysis.filter.test_min_hash.hash_count", "1")
.put("index.analysis.filter.test_min_hash.bucket_count", "2")
.put("index.analysis.filter.test_min_hash.hash_set_size", "1")
.put("index.analysis.filter.test_min_hash.with_rotation", false)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
TokenFilterFactory tokenFilter = analysisService.tokenFilter("test_min_hash");
String source = "sushi";
Tokenizer tokenizer = new WhitespaceTokenizer();
tokenizer.setReader(new StringReader(source));

// despite the fact that bucket_count is 2 and hash_set_size is 1,
// because with_rotation is false, we only expect 1 token here.
assertStreamHasNumberOfTokens(tokenFilter.create(tokenizer), 1);
}
}
4 changes: 3 additions & 1 deletion docs/reference/analysis/tokenfilters.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -87,4 +87,6 @@ include::tokenfilters/apostrophe-tokenfilter.asciidoc[]

include::tokenfilters/decimal-digit-tokenfilter.asciidoc[]

include::tokenfilters/fingerprint-tokenfilter.asciidoc[]
include::tokenfilters/fingerprint-tokenfilter.asciidoc[]

include::tokenfilters/minhash-tokenfilter.asciidoc[]
22 changes: 22 additions & 0 deletions docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
[[analysis-minhash-tokenfilter]]
== Minhash Token Filter

A token filter of type `minhash` hashes each token of the token stream and divides
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

s/minhash/min_hash/

the resulting hashes into buckets, keeping the lowest-valued hashes per
bucket. It then returns these hashes as tokens.

The following are settings that can be set for a `minhash` token filter.

[cols="<,<", options="header",]
|=======================================================================
|Setting |Description
|`hash_count` |The number of hashes to hash the token stream with. Defaults to `1`.

|`bucket_count` |The number of buckets to divide the minhashes into. Defaults to `512`.

|`hash_set_size` |The number of minhashes to keep per bucket. Defaults to `1`.

|`with_rotation` |Whether or not to fill empty buckets with the value of the first non-empty
bucket to its circular right. Only takes effect if hash_set_size is equal to one.
Defaults to `true` if bucket_count is greater than one, else `false`.
|=======================================================================
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
import org.elasticsearch.index.analysis.LowerCaseTokenFilterFactory;
import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory;
import org.elasticsearch.index.analysis.MappingCharFilterFactory;
import org.elasticsearch.index.analysis.MinHashTokenFilterFactory;
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
import org.elasticsearch.index.analysis.NGramTokenFilterFactory;
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
Expand Down Expand Up @@ -93,7 +94,7 @@
import java.util.Set;
import java.util.TreeSet;

/**
/**
* Alerts us if new analyzers are added to lucene, so we don't miss them.
* <p>
* If we don't want to expose one for a specific reason, just map it to Void.
Expand All @@ -115,11 +116,11 @@ public class AnalysisFactoryTestCase extends ESTestCase {
.put("thai", ThaiTokenizerFactory.class)
.put("uax29urlemail", UAX29URLEmailTokenizerFactory.class)
.put("whitespace", WhitespaceTokenizerFactory.class)

// this one "seems to mess up offsets". probably shouldn't be a tokenizer...
.put("wikipedia", Void.class)
.immutableMap();

static final Map<String,Class<?>> KNOWN_TOKENFILTERS = new MapBuilder<String,Class<?>>()
// exposed in ES
.put("apostrophe", ApostropheFilterFactory.class)
Expand Down Expand Up @@ -184,6 +185,7 @@ public class AnalysisFactoryTestCase extends ESTestCase {
.put("scandinaviannormalization", ScandinavianNormalizationFilterFactory.class)
.put("serbiannormalization", SerbianNormalizationFilterFactory.class)
.put("shingle", ShingleTokenFilterFactory.class)
.put("minhash", MinHashTokenFilterFactory.class)
.put("snowballporter", SnowballTokenFilterFactory.class)
.put("soraninormalization", SoraniNormalizationFilterFactory.class)
.put("soranistem", StemmerTokenFilterFactory.class)
Expand All @@ -199,7 +201,7 @@ public class AnalysisFactoryTestCase extends ESTestCase {
.put("type", KeepTypesFilterFactory.class)
.put("uppercase", UpperCaseTokenFilterFactory.class)
.put("worddelimiter", WordDelimiterTokenFilterFactory.class)

// TODO: these tokenfilters are not yet exposed: useful?

// suggest stop
Expand Down Expand Up @@ -228,16 +230,15 @@ public class AnalysisFactoryTestCase extends ESTestCase {
.put("fingerprint", Void.class)
// for tee-sinks
.put("daterecognizer", Void.class)
.put("minhash", Void.class)

.immutableMap();

static final Map<String,Class<?>> KNOWN_CHARFILTERS = new MapBuilder<String,Class<?>>()
// exposed in ES
.put("htmlstrip", HtmlStripCharFilterFactory.class)
.put("mapping", MappingCharFilterFactory.class)
.put("patternreplace", PatternReplaceCharFilterFactory.class)

// TODO: these charfilters are not yet exposed: useful?
// handling of zwnj for persian
.put("persian", Void.class)
Expand Down