Skip to content

Commit

Permalink
feat: Added nomic-embed-text-v1 support
Browse files Browse the repository at this point in the history
  • Loading branch information
Anush008 committed Feb 13, 2024
1 parent 558a837 commit e32a903
Show file tree
Hide file tree
Showing 9 changed files with 64 additions and 44 deletions.
50 changes: 31 additions & 19 deletions docs/examples/Supported_Models.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -110,30 +110,38 @@
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>nomic-ai/nomic-embed-text-v1</td>\n",
" <td>768</td>\n",
" <td>8192 context length english model</td>\n",
" <td>0.54</td>\n",
" <td>{'hf': 'xenova/nomic-embed-text-v1'}</td>\n",
" </tr>\n",
" <tr>\n",
" <th>9</th>\n",
" <td>intfloat/multilingual-e5-large</td>\n",
" <td>1024</td>\n",
" <td>Multilingual model, e5-large. Recommend using this model for non-English languages</td>\n",
" <td>2.24</td>\n",
" <td>{'url': 'https://storage.googleapis.com/qdrant-fastembed/fast-multilingual-e5-large.tar.gz', 'hf': 'qdrant/multilingual-e5-large-onnx'}</td>\n",
" </tr>\n",
" <tr>\n",
" <th>9</th>\n",
" <th>10</th>\n",
" <td>sentence-transformers/paraphrase-multilingual-mpnet-base-v2</td>\n",
" <td>768</td>\n",
" <td>Sentence-transformers model for tasks like clustering or semantic search</td>\n",
" <td>1.11</td>\n",
" <td>{'hf': 'xenova/paraphrase-multilingual-mpnet-base-v2'}</td>\n",
" </tr>\n",
" <tr>\n",
" <th>10</th>\n",
" <th>11</th>\n",
" <td>jinaai/jina-embeddings-v2-base-en</td>\n",
" <td>768</td>\n",
" <td>English embedding model supporting 8192 sequence length</td>\n",
" <td>0.55</td>\n",
" <td>{'hf': 'xenova/jina-embeddings-v2-base-en'}</td>\n",
" </tr>\n",
" <tr>\n",
" <th>11</th>\n",
" <th>12</th>\n",
" <td>jinaai/jina-embeddings-v2-small-en</td>\n",
" <td>512</td>\n",
" <td>English embedding model supporting 8192 sequence length</td>\n",
Expand All @@ -154,10 +162,11 @@
"5 BAAI/bge-small-en-v1.5 384 \n",
"6 BAAI/bge-small-zh-v1.5 512 \n",
"7 sentence-transformers/all-MiniLM-L6-v2 384 \n",
"8 intfloat/multilingual-e5-large 1024 \n",
"9 sentence-transformers/paraphrase-multilingual-mpnet-base-v2 768 \n",
"10 jinaai/jina-embeddings-v2-base-en 768 \n",
"11 jinaai/jina-embeddings-v2-small-en 512 \n",
"8 nomic-ai/nomic-embed-text-v1 768 \n",
"9 intfloat/multilingual-e5-large 1024 \n",
"10 sentence-transformers/paraphrase-multilingual-mpnet-base-v2 768 \n",
"11 jinaai/jina-embeddings-v2-base-en 768 \n",
"12 jinaai/jina-embeddings-v2-small-en 512 \n",
"\n",
" description \\\n",
"0 Base English model \n",
Expand All @@ -168,10 +177,11 @@
"5 Fast and Default English model \n",
"6 Fast and recommended Chinese model \n",
"7 Sentence Transformer model, MiniLM-L6-v2 \n",
"8 Multilingual model, e5-large. Recommend using this model for non-English languages \n",
"9 Sentence-transformers model for tasks like clustering or semantic search \n",
"10 English embedding model supporting 8192 sequence length \n",
"8 8192 context length english model \n",
"9 Multilingual model, e5-large. Recommend using this model for non-English languages \n",
"10 Sentence-transformers model for tasks like clustering or semantic search \n",
"11 English embedding model supporting 8192 sequence length \n",
"12 English embedding model supporting 8192 sequence length \n",
"\n",
" size_in_GB \\\n",
"0 0.50 \n",
Expand All @@ -182,10 +192,11 @@
"5 0.13 \n",
"6 0.10 \n",
"7 0.09 \n",
"8 2.24 \n",
"9 1.11 \n",
"10 0.55 \n",
"11 0.13 \n",
"8 0.54 \n",
"9 2.24 \n",
"10 1.11 \n",
"11 0.55 \n",
"12 0.13 \n",
"\n",
" sources \n",
"0 {'url': 'https://storage.googleapis.com/qdrant-fastembed/fast-bge-base-en.tar.gz'} \n",
Expand All @@ -196,10 +207,11 @@
"5 {'url': 'https://storage.googleapis.com/qdrant-fastembed/fast-bge-small-en-v1.5.tar.gz', 'hf': 'qdrant/bge-small-en-v1.5-onnx-q'} \n",
"6 {'url': 'https://storage.googleapis.com/qdrant-fastembed/fast-bge-small-zh-v1.5.tar.gz'} \n",
"7 {'url': 'https://storage.googleapis.com/qdrant-fastembed/sentence-transformers-all-MiniLM-L6-v2.tar.gz', 'hf': 'qdrant/all-MiniLM-L6-v2-onnx'} \n",
"8 {'url': 'https://storage.googleapis.com/qdrant-fastembed/fast-multilingual-e5-large.tar.gz', 'hf': 'qdrant/multilingual-e5-large-onnx'} \n",
"9 {'hf': 'xenova/paraphrase-multilingual-mpnet-base-v2'} \n",
"10 {'hf': 'xenova/jina-embeddings-v2-base-en'} \n",
"11 {'hf': 'xenova/jina-embeddings-v2-small-en'} "
"8 {'hf': 'xenova/nomic-embed-text-v1'} \n",
"9 {'url': 'https://storage.googleapis.com/qdrant-fastembed/fast-multilingual-e5-large.tar.gz', 'hf': 'qdrant/multilingual-e5-large-onnx'} \n",
"10 {'hf': 'xenova/paraphrase-multilingual-mpnet-base-v2'} \n",
"11 {'hf': 'xenova/jina-embeddings-v2-base-en'} \n",
"12 {'hf': 'xenova/jina-embeddings-v2-small-en'} "
]
},
"execution_count": 4,
Expand Down Expand Up @@ -232,7 +244,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
"version": "3.11.7"
},
"orig_nbformat": 4
},
Expand Down
4 changes: 2 additions & 2 deletions fastembed/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from .embedding import TextEmbedding
from fastembed.text.text_embedding import TextEmbedding

__all__ = [TextEmbedding]
__all__ = ["TextEmbedding"]
2 changes: 1 addition & 1 deletion fastembed/common/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def load_tokenizer(model_dir: Path, max_length: int = 512) -> Tokenizer:

tokenizer = Tokenizer.from_file(str(tokenizer_path))
tokenizer.enable_truncation(max_length=min(tokenizer_config["model_max_length"], max_length))
tokenizer.enable_padding(pad_id=config["pad_token_id"], pad_token=tokenizer_config["pad_token"])
tokenizer.enable_padding(pad_id=config.get("pad_token_id", 0), pad_token=tokenizer_config["pad_token"])

for token in tokens_map.values():
if isinstance(token, str):
Expand Down
8 changes: 4 additions & 4 deletions fastembed/common/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ def define_cache_dir(cache_dir: Optional[str] = None) -> Path:
"""
if cache_dir is None:
default_cache_dir = os.path.join(tempfile.gettempdir(), "fastembed_cache")
cache_dir = Path(os.getenv("FASTEMBED_CACHE_PATH", default_cache_dir))
cache_path = Path(os.getenv("FASTEMBED_CACHE_PATH", default_cache_dir))
else:
cache_dir = Path(cache_dir)
cache_path = Path(cache_dir)

cache_dir.mkdir(parents=True, exist_ok=True)
cache_path.mkdir(parents=True, exist_ok=True)

return cache_dir
return cache_path
10 changes: 5 additions & 5 deletions fastembed/text/e5_onnx_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
"size_in_GB": 1.11,
"sources": {
"hf": "xenova/paraphrase-multilingual-mpnet-base-v2",
}
}
},
},
]


Expand Down Expand Up @@ -51,8 +51,8 @@ def _preprocess_onnx_input(self, onnx_input: Dict[str, np.ndarray]) -> Dict[str,

class E5OnnxEmbeddingWorker(OnnxTextEmbeddingWorker):
def init_embedding(
self,
model_name: str,
cache_dir: str,
self,
model_name: str,
cache_dir: str,
) -> E5OnnxEmbedding:
return E5OnnxEmbedding(model_name=model_name, cache_dir=cache_dir, threads=1)
17 changes: 13 additions & 4 deletions fastembed/text/onnx_embedding.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os
from multiprocessing import get_all_start_methods
from typing import List, Dict, Any, Tuple, Union, Iterable, Type
from typing import List, Dict, Any, Optional, Tuple, Union, Iterable, Type

import numpy as np
import onnxruntime as ort
Expand Down Expand Up @@ -98,6 +98,15 @@
"hf": "qdrant/all-MiniLM-L6-v2-onnx",
},
},
{
"model": "nomic-ai/nomic-embed-text-v1",
"dim": 768,
"description": "8192 context length english model",
"size_in_GB": 0.54,
"sources": {
"hf": "xenova/nomic-embed-text-v1",
},
},
# {
# "model": "sentence-transformers/all-MiniLM-L6-v2",
# "dim": 384,
Expand Down Expand Up @@ -149,8 +158,8 @@ def _get_model_description(cls, model_name: str) -> Dict[str, Any]:
def __init__(
self,
model_name: str = "BAAI/bge-small-en-v1.5",
cache_dir: str = None,
threads: int = None,
cache_dir: Optional[str] = None,
threads: Optional[int] = None,
**kwargs,
):
"""
Expand Down Expand Up @@ -193,7 +202,7 @@ def embed(
self,
documents: Union[str, Iterable[str]],
batch_size: int = 256,
parallel: int = None,
parallel: Optional[int] = None,
**kwargs,
) -> Iterable[np.ndarray]:
"""
Expand Down
14 changes: 6 additions & 8 deletions fastembed/text/text_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,24 +53,22 @@ def __init__(
):
super().__init__(model_name, cache_dir, threads, **kwargs)

self.model = None
for embedding in self.EMBEDDINGS_REGISTRY:
supported_models = embedding.list_supported_models()
if any(model_name == model["model"] for model in supported_models):
self.model = embedding(model_name, cache_dir, threads, **kwargs)
break
return

if self.model is None:
raise ValueError(
f"Model {model_name} is not supported in TextEmbedding."
"Please check the supported models using `TextEmbedding.list_supported_models()`"
)
raise ValueError(
f"Model {model_name} is not supported in TextEmbedding."
"Please check the supported models using `TextEmbedding.list_supported_models()`"
)

def embed(
self,
documents: Union[str, Iterable[str]],
batch_size: int = 256,
parallel: int = None,
parallel: Optional[int] = None,
**kwargs,
) -> Iterable[np.ndarray]:
"""
Expand Down
2 changes: 1 addition & 1 deletion fastembed/text/text_embedding_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def embed(
self,
documents: Union[str, Iterable[str]],
batch_size: int = 256,
parallel: int = None,
parallel: Optional[int] = None,
**kwargs,
) -> Iterable[np.ndarray]:
raise NotImplementedError()
Expand Down
1 change: 1 addition & 0 deletions tests/test_onnx_embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
),
"jinaai/jina-embeddings-v2-small-en": np.array([-0.0455, -0.0428, -0.0122, 0.0613, 0.0015]),
"jinaai/jina-embeddings-v2-base-en": np.array([-0.0332, -0.0509, 0.0287, -0.0043, -0.0077]),
"nomic-ai/nomic-embed-text-v1": np.array([0.0061, 0.0103, -0.0296, -0.0242, -0.0170]),
}


Expand Down

0 comments on commit e32a903

Please sign in to comment.