forked from intel/neural-compressor
-
Notifications
You must be signed in to change notification settings - Fork 0
/
setup.py
157 lines (138 loc) · 5.42 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import os
import re
import subprocess
import sys
from io import open
from setuptools import find_packages, setup
def fetch_requirements(path):
with open(path, "r") as fd:
return [r.strip() for r in fd.readlines()]
def is_commit_on_tag():
try:
result = subprocess.run(
["git", "describe", "--exact-match", "--tags"], capture_output=True, text=True, check=True
)
tag_name = result.stdout.strip()
return tag_name
except subprocess.CalledProcessError:
return False
def get_build_version():
if is_commit_on_tag():
return __version__
try:
result = subprocess.run(["git", "describe", "--tags"], capture_output=True, text=True, check=True)
_, distance, commit = result.stdout.strip().split("-")
return f"{__version__}.dev{distance}+{commit}"
except subprocess.CalledProcessError:
return __version__
try:
filepath = "./neural_compressor/version.py"
with open(filepath) as version_file:
(__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
except Exception as error:
assert False, "Error: Could not open '%s' due %s\n" % (filepath, error)
PKG_INSTALL_CFG = {
# overall installation config, pip install neural-compressor
"neural_compressor": {
"project_name": "neural_compressor",
"include_packages": find_packages(
include=["neural_compressor", "neural_compressor.*"],
exclude=[
"neural_compressor.template",
],
),
"package_data": {"": ["*.yaml"]},
"install_requires": fetch_requirements("requirements.txt"),
"extras_require": {
"pt": fetch_requirements("requirements_pt.txt"),
"tf": fetch_requirements("requirements_tf.txt"),
},
},
# 3.x pt binary build config, pip install neural-compressor-pt, install 3.x PyTorch API.
"neural_compressor_3x_pt": {
"project_name": "neural_compressor_3x_pt",
"include_packages": find_packages(
include=[
"neural_compressor.common",
"neural_compressor.common.*",
"neural_compressor.torch",
"neural_compressor.torch.*",
],
),
"install_requires": fetch_requirements("requirements_pt.txt"),
},
# 3.x tf binary build config, pip install neural-compressor-tf, install 3.x TensorFlow API.
"neural_compressor_3x_tf": {
"project_name": "neural_compressor_3x_tf",
"include_packages": find_packages(
include=[
"neural_compressor.common",
"neural_compressor.common.*",
"neural_compressor.tensorflow",
"neural_compressor.tensorflow.*",
],
),
"package_data": {"": ["*.yaml"]},
"install_requires": fetch_requirements("requirements_tf.txt"),
},
}
if __name__ == "__main__":
cfg_key = "neural_compressor"
# Temporary implementation of fp8 tensor saving and loading
# Will remove after Habana torch applies below patch:
# https://github.com/pytorch/pytorch/pull/114662
ext_modules = []
cmdclass = {}
if "pt" in sys.argv:
sys.argv.remove("pt")
cfg_key = "neural_compressor_3x_pt"
if "tf" in sys.argv:
sys.argv.remove("tf")
cfg_key = "neural_compressor_3x_tf"
if bool(os.getenv("USE_FP8_CONVERT", False)):
from torch.utils.cpp_extension import BuildExtension, CppExtension
ext_modules = [
CppExtension(
"fp8_convert",
["neural_compressor/torch/algorithms/habana_fp8/tensor/convert.cpp"],
),
]
cmdclass = {"build_ext": BuildExtension}
project_name = PKG_INSTALL_CFG[cfg_key].get("project_name")
include_packages = PKG_INSTALL_CFG[cfg_key].get("include_packages") or {}
package_data = PKG_INSTALL_CFG[cfg_key].get("package_data") or {}
install_requires = PKG_INSTALL_CFG[cfg_key].get("install_requires") or []
extras_require = PKG_INSTALL_CFG[cfg_key].get("extras_require") or {}
entry_points = {
"console_scripts": [
"incbench = neural_compressor.common.benchmark:benchmark",
]
}
setup(
name=project_name,
author="Intel AIPT Team",
version=get_build_version(),
description="Repository of Intel® Neural Compressor",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords="quantization,auto-tuning,post-training static quantization,"
"post-training dynamic quantization,quantization-aware training",
license="Apache 2.0",
url="https://github.com/intel/neural-compressor",
packages=include_packages,
include_package_data=True,
package_data=package_data,
install_requires=install_requires,
ext_modules=ext_modules, # for fp8
cmdclass=cmdclass, # for fp8
entry_points=entry_points,
extras_require=extras_require,
python_requires=">=3.7.0",
classifiers=[
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: Apache Software License",
],
)