Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update sklearn API for Gensim models #1473

Merged
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
c55fcc9
renamed sklearn wrapper classes
Jul 6, 2017
dde234f
added newline for flake8 check
Jul 7, 2017
721806b
renamed sklearn api files
Jul 10, 2017
3cdcfde
updated tests for sklearn api
Jul 10, 2017
99dba85
updated ipynb for sklearn api
Jul 10, 2017
4d1eaf4
PEP8 changes
Jul 10, 2017
155a1ec
updated docstrings for sklearn wrappers
Jul 11, 2017
ae6c0f3
added 'testPersistence' and 'testModelNotFitted' tests for author top…
Jul 11, 2017
3c78873
removed 'set_params' function from all wrappers
chinmayapancholi13 Jul 13, 2017
341ed1f
removed 'get_params' function from base class
chinmayapancholi13 Jul 14, 2017
9113f82
removed 'get_params' function from all api classes
chinmayapancholi13 Jul 14, 2017
2935680
removed 'partial_fit()' from base class
chinmayapancholi13 Jul 19, 2017
9628f99
updated error message
chinmayapancholi13 Jul 19, 2017
3849d06
updated error message for 'partial_fit' function in W2VTransformer
chinmayapancholi13 Jul 20, 2017
6097349
removed 'BaseTransformer' class
chinmayapancholi13 Jul 26, 2017
5b21875
updated error message for 'partial_fit' in 'W2VTransformer'
chinmayapancholi13 Jul 26, 2017
6bfdb4d
added checks for setting attributes after calling 'fit'
chinmayapancholi13 Jul 27, 2017
9f0be87
flake8 fix
chinmayapancholi13 Jul 27, 2017
6004eee
using 'sparse2full' in 'transform' function
chinmayapancholi13 Jul 27, 2017
3262ec2
added missing imports
chinmayapancholi13 Jul 27, 2017
d4e560e
added comment about returning dense representation in 'transform' fun…
chinmayapancholi13 Jul 27, 2017
ad3f1f7
added 'testConsistencyWithGensimModel' for ldamodel
chinmayapancholi13 Jul 27, 2017
877632e
updated ipynb
chinmayapancholi13 Jul 27, 2017
0871b50
updated 'testPartialFit' for Lda and Lsi transformers
chinmayapancholi13 Jul 28, 2017
3f363a1
added author info
chinmayapancholi13 Jul 28, 2017
c0894bc
added 'testConsistencyWithGensimModel' for w2v transformer
chinmayapancholi13 Jul 28, 2017
9b7402d
removed merge conflicts
chinmayapancholi13 Aug 4, 2017
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions gensim/sklearn_integration/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@


from .base_sklearn_wrapper import BaseSklearnWrapper # noqa: F401
from .sklearn_wrapper_gensim_ldamodel import SklLdaModel # noqa: F401
from .sklearn_wrapper_gensim_lsimodel import SklLsiModel # noqa: F401
from .sklearn_wrapper_gensim_rpmodel import SklRpModel # noqa: F401
from .sklearn_wrapper_gensim_ldaseqmodel import SklLdaSeqModel # noqa: F401
from .sklearn_wrapper_gensim_w2vmodel import SklW2VModel # noqa: F401
from .sklearn_wrapper_gensim_atmodel import SklATModel # noqa: F401
from .sklearn_wrapper_gensim_ldamodel import LdaTransformer # noqa: F401
from .sklearn_wrapper_gensim_lsimodel import LsiTransformer # noqa: F401
from .sklearn_wrapper_gensim_rpmodel import RpTransformer # noqa: F401
from .sklearn_wrapper_gensim_ldaseqmodel import LdaSeqTransformer # noqa: F401
from .sklearn_wrapper_gensim_w2vmodel import W2VTransformer # noqa: F401
from .sklearn_wrapper_gensim_atmodel import AuthorTopicTransformer # noqa: F401
4 changes: 2 additions & 2 deletions gensim/sklearn_integration/sklearn_wrapper_gensim_atmodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from gensim.sklearn_integration import BaseSklearnWrapper


class SklATModel(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
class AuthorTopicTransformer(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
"""
Base AuthorTopic module
"""
Expand Down Expand Up @@ -65,7 +65,7 @@ def set_params(self, **parameters):
"""
Set all parameters.
"""
super(SklATModel, self).set_params(**parameters)
super(AuthorTopicTransformer, self).set_params(**parameters)
return self

def fit(self, X, y=None):
Expand Down
4 changes: 2 additions & 2 deletions gensim/sklearn_integration/sklearn_wrapper_gensim_ldamodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from gensim.sklearn_integration import BaseSklearnWrapper


class SklLdaModel(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
class LdaTransformer(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
"""
Base LDA module
"""
Expand Down Expand Up @@ -63,7 +63,7 @@ def set_params(self, **parameters):
"""
Set all parameters.
"""
super(SklLdaModel, self).set_params(**parameters)
super(LdaTransformer, self).set_params(**parameters)
return self

def fit(self, X, y=None):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from gensim.sklearn_integration import BaseSklearnWrapper


class SklLdaSeqModel(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
class LdaSeqTransformer(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
"""
Base LdaSeq module
"""
Expand Down Expand Up @@ -60,7 +60,7 @@ def set_params(self, **parameters):
"""
Set all parameters.
"""
super(SklLdaSeqModel, self).set_params(**parameters)
super(LdaSeqTransformer, self).set_params(**parameters)
return self

def fit(self, X, y=None):
Expand Down Expand Up @@ -99,4 +99,4 @@ def transform(self, docs):
return np.reshape(np.array(X), (len(docs), self.num_topics))

def partial_fit(self, X):
raise NotImplementedError("'partial_fit' has not been implemented for SklLdaSeqModel")
raise NotImplementedError("'partial_fit' has not been implemented for LdaSeqTransformer")
4 changes: 2 additions & 2 deletions gensim/sklearn_integration/sklearn_wrapper_gensim_lsimodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from gensim.sklearn_integration import BaseSklearnWrapper


class SklLsiModel(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
class LsiTransformer(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
"""
Base LSI module
"""
Expand Down Expand Up @@ -50,7 +50,7 @@ def set_params(self, **parameters):
"""
Set all parameters.
"""
super(SklLsiModel, self).set_params(**parameters)
super(LsiTransformer, self).set_params(**parameters)
return self

def fit(self, X, y=None):
Expand Down
6 changes: 3 additions & 3 deletions gensim/sklearn_integration/sklearn_wrapper_gensim_rpmodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from gensim.sklearn_integration import BaseSklearnWrapper


class SklRpModel(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
class RpTransformer(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
"""
Base RP module
"""
Expand All @@ -40,7 +40,7 @@ def set_params(self, **parameters):
"""
Set all parameters.
"""
super(SklRpModel, self).set_params(**parameters)
super(RpTransformer, self).set_params(**parameters)
return self

def fit(self, X, y=None):
Expand Down Expand Up @@ -77,4 +77,4 @@ def transform(self, docs):
return np.reshape(np.array(X), (len(docs), self.num_topics))

def partial_fit(self, X):
raise NotImplementedError("'partial_fit' has not been implemented for SklRpModel")
raise NotImplementedError("'partial_fit' has not been implemented for RpTransformer")
6 changes: 3 additions & 3 deletions gensim/sklearn_integration/sklearn_wrapper_gensim_w2vmodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from gensim.sklearn_integration import BaseSklearnWrapper


class SklW2VModel(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
class W2VTransformer(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
"""
Base Word2Vec module
"""
Expand Down Expand Up @@ -66,7 +66,7 @@ def set_params(self, **parameters):
"""
Set all parameters.
"""
super(SklW2VModel, self).set_params(**parameters)
super(W2VTransformer, self).set_params(**parameters)
return self

def fit(self, X, y=None):
Expand Down Expand Up @@ -101,4 +101,4 @@ def transform(self, words):
return np.reshape(np.array(X), (len(words), self.size))

def partial_fit(self, X):
raise NotImplementedError("'partial_fit' has not been implemented for SklW2VModel")
raise NotImplementedError("'partial_fit' has not been implemented for W2VTransformer")
61 changes: 31 additions & 30 deletions gensim/test/test_sklearn_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@
except ImportError:
raise unittest.SkipTest("Test requires scikit-learn to be installed, which is not available")

from gensim.sklearn_integration.sklearn_wrapper_gensim_rpmodel import SklRpModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldamodel import SklLdaModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_lsimodel import SklLsiModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldaseqmodel import SklLdaSeqModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_w2vmodel import SklW2VModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_atmodel import SklATModel
from gensim.sklearn_integration.sklearn_wrapper_gensim_rpmodel import RpTransformer
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldamodel import LdaTransformer
from gensim.sklearn_integration.sklearn_wrapper_gensim_lsimodel import LsiTransformer
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldaseqmodel import LdaSeqTransformer
from gensim.sklearn_integration.sklearn_wrapper_gensim_w2vmodel import W2VTransformer
from gensim.sklearn_integration.sklearn_wrapper_gensim_atmodel import AuthorTopicTransformer
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As long as we're making breaking renames/refactors for clarity/concision, these package-names have a lot of redundancy, too. gensim.sklearn_integration.sklearn_wrapper_gensim_atmodel repeats both sklearn and gensim, and integration and wrapper have overlapping meanings.

So perhaps just: gensim.sklearn_wrappers.atmodel? And that's if at is already easily recognized from use elsewhere as abbreviation; otherwise gensim.sklearn_wrappers.authortopicmodel or even gensim.sklearn_wrappers.authortopic?

Or maybe even:, given that these wrappers are each pretty slight-in-size, they could either go: (1) in the respective associated model file – that is W2VTransformer goes into gensim.models.word2vec (and probably loses its abbreviation, to match the non-abbreviation used in that file's classes); or (2) into a single sklearn_wrappers file, alongside the BaseSklearnWrapper?

(And regarding BaseSklearnWrapper - not sure it adds much beyond alternative of using skearn's own BaseEstimator and TransformerMixin. It forces more rigor in overriding the necessary abstract-methods, which sklearn itself never does, but has less functionality in set_params() and other aspects of introspectability.)

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cc @piskvorky for overall library naming/organization priorities

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I vote for from gensim.models.atmodel import AuthorTopicTransformer, but would agree with from gensim.sklearn_api import AuthorTopicTransformer too. It is neither a wrapper nor an integration, but just an API.

Copy link
Owner

@piskvorky piskvorky Jul 7, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we were to share the same module for gensim/sklearn classes, we'd have to tiptoe around the sklearn imports, because sklearn is not a dependency of gensim.

An extra subpackage sounds cleaner to me: one subpackage for sklearn / keras / spark / whatever API. Not imported automatically from gensim __init__, so that users need to import it explicitly, after installing sklearn/keras/spark/tensorflow/whatever.

And if the subpackage becomes too unwieldy or complex (not the case with sklearn now), a separate library would make sense to me too, to decouple the release and maintenance cycle.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I vote for from gensim.sklearn_api import AuthorTopicTransformer with subpackage sklearn_api.
We will not have "sklearn as dependency", but have "short and uniq import path" for sklearn wrappers.

from gensim.corpora import mmcorpus, Dictionary
from gensim import matutils

Expand Down Expand Up @@ -97,10 +97,11 @@
['advances', 'in', 'the', 'understanding', 'of', 'electromagnetism', 'or', 'nuclear', 'physics', 'led', 'directly', 'to', 'the', 'development', 'of', 'new', 'products', 'that', 'have', 'dramatically', 'transformed', 'modern', 'day', 'society']
]

class TestSklLdaModelWrapper(unittest.TestCase):

class TestLdaWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
self.model = SklLdaModel(id2word=dictionary, num_topics=2, passes=100, minimum_probability=0, random_state=numpy.random.seed(0))
self.model = LdaTransformer(id2word=dictionary, num_topics=2, passes=100, minimum_probability=0, random_state=numpy.random.seed(0))
self.model.fit(corpus)

def testTransform(self):
Expand Down Expand Up @@ -130,7 +131,7 @@ def testCSRMatrixConversion(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
arr = numpy.array([[1, 2, 0], [0, 0, 3], [1, 0, 0]])
sarr = sparse.csr_matrix(arr)
newmodel = SklLdaModel(num_topics=2, passes=100)
newmodel = LdaTransformer(num_topics=2, passes=100)
newmodel.fit(sarr)
bow = [(0, 1), (1, 2), (2, 0)]
transformed_vec = newmodel.transform(bow)
Expand All @@ -139,7 +140,7 @@ def testCSRMatrixConversion(self):
self.assertTrue(passed)

def testPipeline(self):
model = SklLdaModel(num_topics=2, passes=10, minimum_probability=0, random_state=numpy.random.seed(0))
model = LdaTransformer(num_topics=2, passes=10, minimum_probability=0, random_state=numpy.random.seed(0))
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
Expand Down Expand Up @@ -186,16 +187,16 @@ def testPersistence(self):
self.assertTrue(passed)

def testModelNotFitted(self):
lda_wrapper = SklLdaModel(id2word=dictionary, num_topics=2, passes=100, minimum_probability=0, random_state=numpy.random.seed(0))
lda_wrapper = LdaTransformer(id2word=dictionary, num_topics=2, passes=100, minimum_probability=0, random_state=numpy.random.seed(0))
texts_new = ['graph', 'eulerian']
bow = lda_wrapper.id2word.doc2bow(texts_new)
self.assertRaises(NotFittedError, lda_wrapper.transform, bow)


class TestSklLsiModelWrapper(unittest.TestCase):
class TestLsiWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
self.model = SklLsiModel(id2word=dictionary, num_topics=2)
self.model = LsiTransformer(id2word=dictionary, num_topics=2)
self.model.fit(corpus)

def testTransform(self):
Expand All @@ -222,7 +223,7 @@ def testPartialFit(self):
self.assertTrue(passed)

def testPipeline(self):
model = SklLsiModel(num_topics=2)
model = LsiTransformer(num_topics=2)
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
Expand Down Expand Up @@ -269,15 +270,15 @@ def testPersistence(self):
self.assertTrue(passed)

def testModelNotFitted(self):
lsi_wrapper = SklLsiModel(id2word=dictionary, num_topics=2)
lsi_wrapper = LsiTransformer(id2word=dictionary, num_topics=2)
texts_new = ['graph', 'eulerian']
bow = lsi_wrapper.id2word.doc2bow(texts_new)
self.assertRaises(NotFittedError, lsi_wrapper.transform, bow)


class TestSklLdaSeqModelWrapper(unittest.TestCase):
class TestLdaSeqWrapper(unittest.TestCase):
def setUp(self):
self.model = SklLdaSeqModel(id2word=dictionary_ldaseq, num_topics=2, time_slice=[10, 10, 11], initialize='own', sstats=sstats_ldaseq)
self.model = LdaSeqTransformer(id2word=dictionary_ldaseq, num_topics=2, time_slice=[10, 10, 11], initialize='own', sstats=sstats_ldaseq)
self.model.fit(corpus_ldaseq)

def testTransform(self):
Expand Down Expand Up @@ -319,7 +320,7 @@ def testPipeline(self):
test_target = data.target[0:2]
id2word = Dictionary(map(lambda x: x.split(), test_data))
corpus = [id2word.doc2bow(i.split()) for i in test_data]
model = SklLdaSeqModel(id2word=id2word, num_topics=2, time_slice=[1, 1, 1], initialize='gensim')
model = LdaSeqTransformer(id2word=id2word, num_topics=2, time_slice=[1, 1, 1], initialize='gensim')
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_ldaseq = Pipeline((('features', model,), ('classifier', clf)))
text_ldaseq.fit(corpus, test_target)
Expand All @@ -343,15 +344,15 @@ def testPersistence(self):
self.assertTrue(passed)

def testModelNotFitted(self):
ldaseq_wrapper = SklLdaSeqModel(num_topics=2)
ldaseq_wrapper = LdaSeqTransformer(num_topics=2)
doc = list(corpus_ldaseq)[0]
self.assertRaises(NotFittedError, ldaseq_wrapper.transform, doc)


class TestSklRpModelWrapper(unittest.TestCase):
class TestRpWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(13)
self.model = SklRpModel(num_topics=2)
self.model = RpTransformer(num_topics=2)
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.model.fit(self.corpus)

Expand All @@ -378,7 +379,7 @@ def testSetGetParams(self):

def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
model = SklRpModel(num_topics=2)
model = RpTransformer(num_topics=2)
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
Expand Down Expand Up @@ -410,15 +411,15 @@ def testPersistence(self):
self.assertTrue(passed)

def testModelNotFitted(self):
rpmodel_wrapper = SklRpModel(num_topics=2)
rpmodel_wrapper = RpTransformer(num_topics=2)
doc = list(self.corpus)[0]
self.assertRaises(NotFittedError, rpmodel_wrapper.transform, doc)


class TestSklW2VModelWrapper(unittest.TestCase):
class TestWord2VecWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = SklW2VModel(size=10, min_count=0, seed=42)
self.model = W2VTransformer(size=10, min_count=0, seed=42)
self.model.fit(texts)

def testTransform(self):
Expand All @@ -443,7 +444,7 @@ def testSetGetParams(self):

def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
model = SklW2VModel(size=10, min_count=1)
model = W2VTransformer(size=10, min_count=1)
model.fit(w2v_texts)

class_dict = {'mathematics': 1, 'physics': 0}
Expand Down Expand Up @@ -477,14 +478,14 @@ def testPersistence(self):
self.assertTrue(passed)

def testModelNotFitted(self):
w2vmodel_wrapper = SklW2VModel(size=10, min_count=0, seed=42)
w2vmodel_wrapper = W2VTransformer(size=10, min_count=0, seed=42)
word = texts[0][0]
self.assertRaises(NotFittedError, w2vmodel_wrapper.transform, word)


class TestSklATModelWrapper(unittest.TestCase):
class TestAuthorTopicWrapper(unittest.TestCase):
def setUp(self):
self.model = SklATModel(id2word=dictionary, author2doc=author2doc, num_topics=2, passes=100)
self.model = AuthorTopicTransformer(id2word=dictionary, author2doc=author2doc, num_topics=2, passes=100)
self.model.fit(corpus)

def testTransform(self):
Expand Down Expand Up @@ -522,7 +523,7 @@ def testSetGetParams(self):

def testPipeline(self):
# train the AuthorTopic model first
model = SklATModel(id2word=dictionary, author2doc=author2doc, num_topics=10, passes=100)
model = AuthorTopicTransformer(id2word=dictionary, author2doc=author2doc, num_topics=10, passes=100)
model.fit(corpus)

# create and train clustering model
Expand Down