forked from run-llama/llama_index
-
Notifications
You must be signed in to change notification settings - Fork 0
/
setup_llama.py
41 lines (35 loc) · 1.04 KB
/
setup_llama.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
"""Set up the package."""
import sys
from pathlib import Path
from setuptools import find_packages, setup
with open(Path(__file__).absolute().parents[0] / "llama_index" / "VERSION") as _f:
__version__ = _f.read().strip()
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
install_requires = [
"langchain",
"openai>=0.26.4",
"dataclasses_json",
"transformers",
"nltk",
"numpy",
"tenacity<8.2.0",
"pandas",
]
# NOTE: if python version >= 3.9, install tiktoken
if sys.version_info >= (3, 9):
install_requires.extend(["tiktoken"])
# upload duplicate package to pypi
setup(
name="llama_index",
version=__version__,
# packages=find_packages(),
packages=find_packages(include=["llama*"]),
description="Interface between LLMs and your data.",
install_requires=install_requires,
long_description=long_description,
license="MIT",
url="https://github.com/jerryjliu/gpt_index",
include_package_data=True,
long_description_content_type="text/markdown",
)