diff --git a/README.md b/README.md index c234833b591..3c220c7b1dd 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,17 @@ -Introduction to Intel® LPOT +Introduction to Intel® Neural Compressor =========================== -The Intel® Low Precision Optimization Tool (Intel® LPOT) is an open-source Python library that delivers a unified low-precision inference interface across multiple Intel-optimized Deep Learning (DL) frameworks on both CPUs and GPUs. It supports automatic accuracy-driven tuning strategies, along with additional objectives such as optimizing for performance, model size, and memory footprint. It also provides easy extension capability for new backends, tuning strategies, metrics, and objectives. +Intel® Neural Compressor (formerly known as Intel® Low Precision Optimization Tool) is an open-source Python library running on Intel CPUs and GPUs, which delivers unified interfaces across multiple deep learning frameworks for popular network compression technologies, such as quantization, pruning, knowledge distillation. This tool supports automatic accuracy-driven tuning strategies to help user quickly find out the best quantized model. It also implements different weight pruning algorithms to generate pruned model with predefined sparsity goal and supports knowledge distillation to distill the knowledge from the teacher model to the student model. > **Note** > > GPU support is under development. -**Visit the Intel® LPOT online document website at: .** +**Visit the Intel® Neural Compressor online document website at: .** ## Architecture -Intel® LPOT features an infrastructure and workflow that aids in increasing performance and faster deployments across architectures. +Intel® Neural Compressor features an infrastructure and workflow that aids in increasing performance and faster deployments across architectures. #### Infrastructure @@ -32,17 +32,17 @@ Click the image to enlarge it. #### Supported Frameworks -Supported Intel-optimized DL frameworks are: +Supported deep learning frameworks are: * [TensorFlow\*](https://github.com/Intel-tensorflow/tensorflow), including [1.15.0 UP3](https://github.com/Intel-tensorflow/tensorflow/tree/v1.15.0up3), [1.15.0 UP2](https://github.com/Intel-tensorflow/tensorflow/tree/v1.15.0up2), [1.15.0 UP1](https://github.com/Intel-tensorflow/tensorflow/tree/v1.15.0up1), [2.1.0](https://github.com/Intel-tensorflow/tensorflow/tree/v2.1.0), [2.2.0](https://github.com/Intel-tensorflow/tensorflow/tree/v2.2.0), [2.3.0](https://github.com/Intel-tensorflow/tensorflow/tree/v2.3.0), [2.4.0](https://github.com/Intel-tensorflow/tensorflow/tree/v2.4.0), [2.5.0](https://github.com/Intel-tensorflow/tensorflow/tree/v2.5.0), [Official TensorFlow 2.6.0](https://github.com/tensorflow/tensorflow/tree/v2.6.0) -> **Note**: Intel Optimized TensorFlow 2.5.0 requires to set environment variable TF_ENABLE_MKL_NATIVE_FORMAT=0 before running LPOT quantization or deploying the quantized model. +> **Note**: Intel Optimized TensorFlow 2.5.0 requires to set environment variable TF_ENABLE_MKL_NATIVE_FORMAT=0 before running Neural Compressor quantization or deploying the quantized model. -> **Note**: From the official TensorFlow 2.6.0, oneDNN support has been upstreamed. Download the official TensorFlow 2.6.0 binary for the CPU device and set the environment variable TF_ENABLE_ONEDNN_OPTS=1 before running the LPOT quantization or deploying the quantized model. +> **Note**: From the official TensorFlow 2.6.0, oneDNN support has been upstreamed. Download the official TensorFlow 2.6.0 binary for the CPU device and set the environment variable TF_ENABLE_ONEDNN_OPTS=1 before running the quantization process or deploying the quantized model. * [PyTorch\*](https://pytorch.org/), including [1.5.0+cpu](https://download.pytorch.org/whl/torch_stable.html), [1.6.0+cpu](https://download.pytorch.org/whl/torch_stable.html), [1.8.0+cpu](https://download.pytorch.org/whl/torch_stable.html) * [Apache\* MXNet](https://mxnet.apache.org), including [1.6.0](https://github.com/apache/incubator-mxnet/tree/1.6.0), [1.7.0](https://github.com/apache/incubator-mxnet/tree/1.7.0), [1.8.0](https://github.com/apache/incubator-mxnet/tree/1.8.0) * [ONNX\* Runtime](https://github.com/microsoft/onnxruntime), including [1.6.0](https://github.com/microsoft/onnxruntime/tree/v1.6.0), [1.7.0](https://github.com/microsoft/onnxruntime/tree/v1.7.0), [1.8.0](https://github.com/microsoft/onnxruntime/tree/v1.8.0) - +* [Engine](./docs/engine.md), which is a built-in bare metal [inference engine](./engine) in Intel® Neural Compressor. ## Installation @@ -51,39 +51,39 @@ Select the installation based on your operating system. ### Linux Installation -You can install LPOT using one of three options: Install just the LPOT library +You can install Neural Compressor using one of three options: Install just the library from binary or source, or get the Intel-optimized framework together with the -LPOT library by installing the [Intel® oneAPI AI Analytics Toolkit](https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit.html). +library by installing the [Intel® oneAPI AI Analytics Toolkit](https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit.html). #### Option 1 Install from binary ```Shell # install stable version from pip - pip install lpot + pip install neural-compressor # install nightly version from pip - pip install -i https://test.pypi.org/simple/ lpot + pip install -i https://test.pypi.org/simple/ neural-compressor # install stable version from from conda - conda install lpot -c conda-forge -c intel + conda install neural-compressor -c conda-forge -c intel ``` #### Option 2 Install from source ```Shell - git clone https://github.com/intel/lpot.git - cd lpot + git clone https://github.com/intel/neural-compressor.git + cd neural-compressor pip install -r requirements.txt python setup.py install ``` #### Option 3 Install from AI Kit -The Intel® LPOT library is released as part of the +The Intel® Neural Compressor library is released as part of the [Intel® oneAPI AI Analytics Toolkit](https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit.html) (AI Kit). The AI Kit provides a consolidated package of Intel's latest deep learning and machine optimizations all in one place for ease of development. Along with -LPOT, the AI Kit includes Intel-optimized versions of deep learning frameworks +Neural Compressor, the AI Kit includes Intel-optimized versions of deep learning frameworks (such as TensorFlow and PyTorch) and high-performing Python libraries to streamline end-to-end data science and AI workflows on Intel architectures. @@ -107,12 +107,12 @@ The following prerequisites and requirements must be satisfied for a successful - Download and install [anaconda](https://anaconda.org/). -- Create a virtual environment named lpot in anaconda: +- Create a virtual environment named nc in anaconda: ```shell # Here we install python 3.7 for instance. You can also choose python 3.6, 3.8, or 3.9. - conda create -n lpot python=3.7 - conda activate lpot + conda create -n nc python=3.7 + conda activate nc ``` **Installation options** @@ -120,20 +120,20 @@ The following prerequisites and requirements must be satisfied for a successful ```Shell # install stable version from pip - pip install lpot + pip install neural-compressor # install nightly version from pip - pip install -i https://test.pypi.org/simple/ lpot + pip install -i https://test.pypi.org/simple/ neural-compressor # install from conda - conda install lpot -c conda-forge -c intel + conda install neural-compressor -c conda-forge -c intel ``` #### Option 2 Install from source ```shell -git clone https://github.com/intel/lpot.git -cd lpot +git clone https://github.com/intel/neural-compressor.git +cd neural-compressor pip install -r requirements.txt python setup.py install ``` @@ -142,21 +142,23 @@ python setup.py install **Get Started** -* [APIs](docs/api-introduction.md) explains Intel® Low Precision Optimization Tool's API. -* [Transform](docs/transform.md) introduces how to utilize LPOT's built-in data processing and how to develop a custom data processing method. -* [Dataset](docs/dataset.md) introduces how to utilize LPOT's built-in dataset and how to develop a custom dataset. -* [Metric](docs/metric.md) introduces how to utilize LPOT's built-in metrics and how to develop a custom metric. -* [Tutorial](docs/tutorial.md) provides comprehensive instructions on how to utilize LPOT's features with examples. -* [Examples](/examples) are provided to demonstrate the usage of LPOT in different frameworks: TensorFlow, PyTorch, MXNet, and ONNX Runtime. +* [APIs](docs/api-introduction.md) explains Intel® Neural Compressor's API. +* [Transform](docs/transform.md) introduces how to utilize Neural Compressor's built-in data processing and how to develop a custom data processing method. +* [Dataset](docs/dataset.md) introduces how to utilize Neural Compressor's built-in dataset and how to develop a custom dataset. +* [Metric](docs/metric.md) introduces how to utilize Neural Compressor's built-in metrics and how to develop a custom metric. +* [Tutorial](docs/tutorial.md) provides comprehensive instructions on how to utilize Neural Compressor's features with examples. +* [Examples](/examples) are provided to demonstrate the usage of Neural Compressor in different frameworks: TensorFlow, PyTorch, MXNet, and ONNX Runtime. * [Intel® Neural Compressor Bench](docs/bench.md) is a web-based system used to simplify Intel® Neural Compressor usage. * [Intel oneAPI AI Analytics Toolkit Get Started Guide](https://software.intel.com/content/www/us/en/develop/documentation/get-started-with-ai-linux/top.html) explains the AI Kit components, installation and configuration guides, and instructions for building and running sample apps. * [AI and Analytics Samples](https://github.com/oneapi-src/oneAPI-samples/tree/master/AI-and-Analytics) includes code samples for Intel oneAPI libraries. **Deep Dive** -* [Quantization](docs/Quantization.md) are processes that enable inference and training by performing computations at low-precision data types, such as fixed-point integers. LPOT supports Post-Training Quantization ([PTQ](docs/PTQ.md)) with [different quantization capabilities](docs/backend_quant.md) and Quantization-Aware Training ([QAT](docs/QAT.md)). Note that ([Dynamic Quantization](docs/dynamic_quantization.md)) currently has limited support. +* [Quantization](docs/Quantization.md) are processes that enable inference and training by performing computations at low-precision data types, such as fixed-point integers. Neural Compressor supports Post-Training Quantization ([PTQ](docs/PTQ.md)) with [different quantization capabilities](docs/backend_quant.md) and Quantization-Aware Training ([QAT](docs/QAT.md)). Note that ([Dynamic Quantization](docs/dynamic_quantization.md)) currently has limited support. * [Pruning](docs/pruning.md) provides a common method for introducing sparsity in weights and activations. -* [Benchmarking](docs/benchmark.md) introduces how to utilize the benchmark interface of LPOT. +* [Knowledge Distillation](docs/distillation.md) provides a common method for distilling knowledge from teacher model to student model. +* [Distributed Training](docs/distributed.md) introduces how to leverage Horovod to do multi-node training in Intel® Neural Compressor to speed up the training time. +* [Benchmarking](docs/benchmark.md) introduces how to utilize the benchmark interface of Neural Compressor. * [Mixed precision](docs/mixed_precision.md) introduces how to enable mixed precision, including BFP16 and int8 and FP32, on Intel platforms during tuning. * [Graph Optimization](docs/graph_optimization.md) introduces how to enable graph optimization for FP32 and auto-mixed precision. * [Model Conversion](docs/model_conversion.md) introduces how to convert TensorFlow QAT model to quantized model running on Intel platforms. @@ -164,7 +166,8 @@ python setup.py install **Advanced Topics** -* [Adaptor](docs/adaptor.md) is the interface between LPOT and framework. The method to develop adaptor extension is introduced with ONNX Runtime as example. +* [Engine](docs/engine.md) is a new backend supported by Intel® Neural Compressor to support domain-specific acceleration for NLP models. +* [Adaptor](docs/adaptor.md) is the interface between components and framework. The method to develop adaptor extension is introduced with ONNX Runtime as example. * [Strategy](docs/tuning_strategies.md) can automatically optimized low-precision recipes for deep learning models to achieve optimal product objectives like inference performance and memory usage with expected accuracy criteria. The method to develop a new strategy is introduced. **Publications** @@ -179,12 +182,14 @@ Full publication list please refers to [here](docs/publication_list.md) ## System Requirements -Intel® Low Precision Optimization Tool supports systems based on [Intel 64 architecture or compatible processors](https://en.wikipedia.org/wiki/X86-64), specially optimized for the following CPUs: +Intel® Neural Compressor supports systems based on [Intel 64 architecture or compatible processors](https://en.wikipedia.org/wiki/X86-64), specially optimized for the following CPUs: * Intel Xeon Scalable processor (formerly Skylake, Cascade Lake, Cooper Lake, and Icelake) * future Intel Xeon Scalable processor (code name Sapphire Rapids) -Intel® Low Precision Optimization Tool requires installing the pertinent Intel-optimized framework version for TensorFlow, PyTorch, MXNet, and ONNX runtime. +Intel® Neural Compressor requires installing the Intel-optimized framework version for the supported DL framework you use: TensorFlow, PyTorch, MXNet, or ONNX runtime. + +Note: Intel Neural Compressor supports Intel-optimized and official frameworks for some TensorFlow versions. Refer to [Supported Frameworks](#Supported-Frameworks) for specifics. ### Validated Hardware/Software Environment @@ -271,7 +276,7 @@ Intel® Low Precision Optimization Tool requires installing the pertinent Intel- ### Validated Models -Intel® Low Precision Optimization Tool provides numerous examples to show promising accuracy loss with the best performance gain. A full quantized model list on various frameworks is available in the [Model List](docs/full_model_list.md). +Intel® Neural Compressor provides numerous examples to show promising accuracy loss with the best performance gain. A full quantized model list on various frameworks is available in the [Model List](docs/full_model_list.md). #### Validated MLPerf Models @@ -289,29 +294,29 @@ Intel® Low Precision Optimization Tool provides numerous examples to show promi ResNet50 v1.5 TensorFlow Yes - Link + Link PyTorch Yes - Link + Link DLRM PyTorch Yes - Link + Link BERT-large TensorFlow Yes - Link + Link PyTorch Yes - Link + Link SSD-ResNet34 @@ -322,7 +327,7 @@ Intel® Low Precision Optimization Tool provides numerous examples to show promi PyTorch Yes - Link + Link RNN-T @@ -339,7 +344,7 @@ Intel® Low Precision Optimization Tool provides numerous examples to show promi PyTorch Yes - Link + Link @@ -847,4 +852,4 @@ Intel® Low Precision Optimization Tool provides numerous examples to show promi * [Contribution Guidelines](contributions.md) * [Legal](legal_information.md) * [Security Policy](security_policy.md) -* [Intel® LPOT Website](https://intel.github.io/lpot) +* [Intel® Neural Compressor Website](https://intel.github.io/neural-compressor) diff --git a/api-documentation/apis.rst b/api-documentation/apis.rst index c78c7cef76d..4041a07d3be 100644 --- a/api-documentation/apis.rst +++ b/api-documentation/apis.rst @@ -1,20 +1,20 @@ APIs #### -.. automodule:: lpot.benchmark +.. automodule:: neural_compressor.benchmark :members: -.. autoclass:: lpot.benchmark.Benchmark +.. autoclass:: neural_compressor.benchmark.Benchmark :members: -.. automodule:: lpot.objective +.. automodule:: neural_compressor.objective :members: -.. automodule:: lpot.pruning +.. automodule:: neural_compressor.pruning :members: -.. automodule:: lpot.quantization +.. automodule:: neural_compressor.quantization :members: -.. automodule:: lpot.version +.. automodule:: neural_compressor.version :members: \ No newline at end of file diff --git a/conf.py b/conf.py index 8c28a7a3494..1fe8034391b 100644 --- a/conf.py +++ b/conf.py @@ -17,20 +17,20 @@ sys.path.insert(0, os.path.abspath('.')) import importlib.util moduleName = 'version' -modulePath = os.getcwd() + '/lpot/version.py' +modulePath = os.getcwd() + '/neural_compressor/version.py' spec = importlib.util.spec_from_file_location(moduleName,modulePath) -LPOTversion = importlib.util.module_from_spec(spec) -spec.loader.exec_module(LPOTversion) +NCversion = importlib.util.module_from_spec(spec) +spec.loader.exec_module(NCversion) # -- Project information ----------------------------------------------------- -project = 'Intel® Low Precision Optimization Tool' -copyright = '2021, Intel® Low Precision Optimization Tool' -author = 'Intel® LPOT developers' +project = 'Intel® Neural Compressor' +copyright = '2021, Intel® Neural Compressor' +author = 'Intel® Neural Compressor developers' # The short X.Y version -version = LPOTversion.__version__ +version = NCversion.__version__ # The full version, including alpha/beta/rc tags release = '' @@ -137,7 +137,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'ProjectnameIntelLowPrecisionOptimizationTool.tex', '\\textgreater{} Project name: Intel® Low Precision Optimization Tool Documentation', + (master_doc, 'ProjectnameIntelLowPrecisionOptimizationTool.tex', '\\textgreater{} Project name: Intel® Neural Compressor Documentation', 'Various', 'manual'), ] @@ -147,7 +147,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'projectnameintellowprecisionoptimizationtool', '> Project name: Intel® Low Precision Optimization Tool Documentation', + (master_doc, 'projectnameintellowprecisionoptimizationtool', '> Project name: Intel® Neural Compressor Documentation', [author], 1) ] @@ -158,7 +158,7 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'ProjectnameIntelLowPrecisionOptimizationTool', '> Project name: Intel® Low Precision Optimization Tool Documentation', + (master_doc, 'ProjectnameIntelLowPrecisionOptimizationTool', '> Project name: Intel® Neural Compressor Documentation', author, 'ProjectnameIntelLowPrecisionOptimizationTool', 'One line description of project.', 'Miscellaneous'), ] @@ -171,7 +171,7 @@ def setup(app): sphinx_md_useGitHubURL = True baseBranch = "master" commitSHA = getenv('GITHUB_SHA') -githubBaseURL = 'https://github.com/' + (getenv('GITHUB_REPOSITORY') or 'intel/lpot') + '/' +githubBaseURL = 'https://github.com/' + (getenv('GITHUB_REPOSITORY') or 'intel/neural-compressor') + '/' githubFileURL = githubBaseURL + "blob/" githubDirURL = githubBaseURL + "tree/" if commitSHA: diff --git a/contributions.md b/contributions.md index 4755b652cc3..761a82e6319 100644 --- a/contributions.md +++ b/contributions.md @@ -1,8 +1,8 @@ Contribution Guidelines ======================= -If you have improvements to Intel® Low Precision Optimization Tool, send your pull requests for -[review](https://github.com/intel/lpot/pulls). If you are new to Github, view the pull request +If you have improvements to Intel® Neural Compressor, send your pull requests for +[review](https://github.com/intel/neural-compressor/pulls). If you are new to Github, view the pull request [How To](https://help.github.com/articles/using-pull-requests/). @@ -12,8 +12,8 @@ Before sending your pull requests, follow the information below: - Changes are consistent with the Python [Coding Style](https://github.com/google/styleguide/blob/gh-pages/pyguide.md). - Use pylint to check your Python code. - Use flake8 and autopep8 to make Python code clean. -- Add unit tests in [Unit Tests](https://github.com/intel/lpot/tree/master/test) to cover the code you would like to contribute. -- Run [Unit Tests](https://github.com/intel/lpot/tree/master/test). +- Add unit tests in [Unit Tests](https://github.com/intel/neural-compressor/tree/master/test) to cover the code you would like to contribute. +- Run [Unit Tests](https://github.com/intel/neural-compressor/tree/master/test). ## Pull Request Template @@ -43,7 +43,7 @@ Provide the development or test environment info. ## Support Submit your questions, feature requests, and bug reports to the -[GitHub issues](https://github.com/intel/lpot/issues) page. You may also reach out to lpot.maintainers@intel.com. +[GitHub issues](https://github.com/intel/neural-compressor/issues) page. You may also reach out to [Maintainers](neural_compressor.maintainers@intel.com). ## Contributor Covenant Code of Conduct diff --git a/docs/QAT.md b/docs/QAT.md index 114f08c2da5..0b4fefc22eb 100644 --- a/docs/QAT.md +++ b/docs/QAT.md @@ -96,8 +96,8 @@ More on quantization-aware training: * We can simulate the accuracy of a quantized model in floating points since we are using fake-quantization to model the numerics of actual quantized arithmetic. * We can easily mimic post-training quantization. -Intel® Low Precision Optimization Tool can support QAT calibration for -PyTorch models. Refer to the [QAT model](https://github.com/intel/lpot/tree/master/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/README.md) for step-by-step tuning. +Intel® Neural Compressor can support QAT calibration for +PyTorch models. Refer to the [QAT model](https://github.com/intel/neural-compressor/tree/master/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/README.md) for step-by-step tuning. ### Example View a [QAT example of PyTorch resnet50](/examples/pytorch/eager/image_recognition/imagenet/cpu/qat). diff --git a/docs/adaptor.md b/docs/adaptor.md index e6c266dc1ca..653ccb680be 100644 --- a/docs/adaptor.md +++ b/docs/adaptor.md @@ -3,15 +3,15 @@ Adaptor ## Introduction -Intel® Low Precision Optimization Tool (LPOT) built the low-precision inference -solution on popular Deep Learning frameworks such as TensorFlow, PyTorch, -MXNet, and ONNX Runtime. The adaptor layer is the bridge between the LPOT +Intel® Neural Compressor builds the low-precision inference +solution on popular deep learning frameworks such as TensorFlow, PyTorch, +MXNet, and ONNX Runtime. The adaptor layer is the bridge between the tuning strategy and vanilla framework quantization APIs. ## Adaptor Design -LPOT supports a new adaptor extension by -implementing a subclass `Adaptor` class in the lpot.adaptor package +Neural Compressor supports a new adaptor extension by +implementing a subclass `Adaptor` class in the neural_compressor.adaptor package and registering this strategy by the `adaptor_registry` decorator. For example, a user can implement an `Abc` adaptor like below: @@ -46,7 +46,7 @@ class AbcAdaptor(Adaptor): #### Background Besides the adaptor API, we also introduced the Query API which describes the -behavior of a specific framework. With this API, LPOT can easily query the +behavior of a specific framework. With this API, Neural Compressor can easily query the following information on the current runtime framework. * The runtime version information. @@ -54,13 +54,13 @@ following information on the current runtime framework. * The supported sequence of each quantizable op. * The instance of each sequence. -In the past, the above information was generally defined and hidden in every corner of the code which made effective maintenance difficult. With the Query API, we only need to create one unified yaml file and call the corresponding API to get the information. For example, the [tensorflow.yaml](../lpot/adaptor/tensorflow.yaml) keeps the current Tensorflow framework ability. We recommend that the end user not make modifications if requirements are not clear. +In the past, the above information was generally defined and hidden in every corner of the code which made effective maintenance difficult. With the Query API, we only need to create one unified yaml file and call the corresponding API to get the information. For example, the [tensorflow.yaml](../neural_compressor/adaptor/tensorflow.yaml) keeps the current Tensorflow framework ability. We recommend that the end user not make modifications if requirements are not clear. #### Unify Config Introduction Below is a fragment of the Tensorflow configuration file. -* **precisions** field defines the supported precision for LPOT. +* **precisions** field defines the supported precision for Neural Compressor. - valid_mixed_precision enumerates all supported precision combinations for specific scenario. For example, if one hardware doesn't support bf16, it should be `int8 + fp32`. * **ops** field defines the valid OP type list for each precision. * **capabilities** field focuses on the quantization ability of specific ops such as granularity, scheme, and algorithm. The activation assumes the same data type for both input and output activation by default based on op semantics defined by frameworks. @@ -193,13 +193,13 @@ Below is a fragment of the Tensorflow configuration file. ``` #### Query API Introduction -The abstract class `QueryBackendCapability` is defined in [query.py](../lpot/adaptor/query.py#L21). Each framework should inherit it and implement the member function if needed. Refer to Tensorflow implementation [TensorflowQuery](../lpot/adaptor/tensorflow.py#L628). +The abstract class `QueryBackendCapability` is defined in [query.py](../neural_compressor/adaptor/query.py#L21). Each framework should inherit it and implement the member function if needed. Refer to Tensorflow implementation [TensorflowQuery](../neural_compressor/adaptor/tensorflow.py#L628). ## Customize a New Framework Backend Look at onnxruntime as an example. ONNX Runtime is a backend proposed by Microsoft, and is based on the MLAS kernel by default. -Onnxruntime already has [quantization tools](https://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/quantization), so the question becomes how to integrate onnxruntime quantization tools into LPOT. +Onnxruntime already has [quantization tools](https://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/quantization), so the question becomes how to integrate onnxruntime quantization tools into Neural Compressor. 1. Capability @@ -214,7 +214,7 @@ Onnxruntime already has [quantization tools](https://github.com/microsoft/onnxru * &1.8 nodes_to_quantize, nodes_to_exclude * op_types_to_quantize - We can pass a tune capability to LPOT such as: + We can pass a tune capability to Neural Compressor such as: ```yaml {'optypewise': {'conv': @@ -243,7 +243,7 @@ Onnxruntime already has [quantization tools](https://github.com/microsoft/onnxru 2. Parse tune config - LPOT can generate a tune config from your tune capability such as the + Neural Compressor can generate a tune config from your tune capability such as the following: ```yaml @@ -286,4 +286,4 @@ Onnxruntime already has [quantization tools](https://github.com/microsoft/onnxru 4. Do quantization - This part depends on your backend implementations. Refer to [onnxruntime](../lpot/adaptor/onnxrt.py) as an example. + This part depends on your backend implementations. Refer to [onnxruntime](../neural_compressor/adaptor/onnxrt.py) as an example. diff --git a/docs/api-introduction.md b/docs/api-introduction.md index 7e389b6f262..f7093ff42b4 100644 --- a/docs/api-introduction.md +++ b/docs/api-introduction.md @@ -3,7 +3,7 @@ API Documentation ## Introduction -Intel® Low Precision Optimization Tool is an open-source Python library designed to help users quickly deploy low-precision inference solutions on popular deep learning (DL) frameworks such as TensorFlow*, PyTorch*, MXNet, and ONNX Runtime. It automatically optimizes low-precision recipes for deep learning models in order to achieve optimal product objectives, such as inference performance and memory usage, with expected accuracy criteria. +Intel® Neural Compressor is an open-source Python library designed to help users quickly deploy low-precision inference solutions on popular deep learning (DL) frameworks such as TensorFlow*, PyTorch*, MXNet, and ONNX Runtime. It automatically optimizes low-precision recipes for deep learning models in order to achieve optimal product objectives, such as inference performance and memory usage, with expected accuracy criteria. ## User-facing APIs @@ -12,18 +12,18 @@ These APIs are intended to unify low-precision quantization interfaces cross mul > **Note** > -> LPOT is continuously improving user-facing APIs to create a better user experience. +> Neural Compressor is continuously improving user-facing APIs to create a better user experience. -> Two sets of user-facing APIs exist. One is the default one supported from LPOT v1.0 for backwards compatibility. The other set consists of new APIs in -the `lpot.experimental` package. +> Two sets of user-facing APIs exist. One is the default one supported from Neural Compressor v1.0 for backwards compatibility. The other set consists of new APIs in +the `neural_compressor.experimental` package. -> We recommend that you use the APIs located in lpot.experimental. All examples have been updated to use the experimental APIs. +> We recommend that you use the APIs located in neural_compressor.experimental. All examples have been updated to use the experimental APIs. The major differences between the default user-facing APIs and the experimental APIs are: -1. The experimental APIs abstract the `lpot.experimental.common.Model` concept to cover those cases whose weight and graph files are stored separately. +1. The experimental APIs abstract the `neural_compressor.experimental.common.Model` concept to cover those cases whose weight and graph files are stored separately. 2. The experimental APIs unify the calling style of the `Quantization`, `Pruning`, and `Benchmark` classes by setting model, calibration dataloader, evaluation dataloader, and metric through class attributes rather than passing them as function inputs. -3. The experimental APIs refine LPOT built-in transforms/datasets/metrics by unifying the APIs cross different framework backends. +3. The experimental APIs refine Neural Compressor built-in transforms/datasets/metrics by unifying the APIs cross different framework backends. ## Experimental user-facing APIs @@ -32,7 +32,7 @@ Experimental user-facing APIs consist of the following components: ### Quantization-related APIs ```python -# lpot.experimental.Quantization +# neural_compressor.experimental.Quantization class Quantization(object): def __init__(self, conf_fname_or_obj): ... @@ -71,17 +71,17 @@ class Quantization(object): ``` The `conf_fname_or_obj` parameter used in the class initialization is the path to the user yaml configuration file or Quantization_Conf class. This yaml file is used to control the entire tuning behavior on the model. -**LPOT User YAML Syntax** +**Neural Compressor User YAML Syntax** -> Intel® Low Precision Optimization Tool provides template yaml files for [Post-Training Quantization](../lpot/template/ptq.yaml), [Quantization-Aware Training](../lpot/template/qat.yaml), and [Pruning](../lpot/template/pruning.yaml) scenarios. Refer to these template files to understand the meaning of each field. +> Intel® Neural Compressor provides template yaml files for [Post-Training Quantization](../neural_compressor/template/ptq.yaml), [Quantization-Aware Training](../neural_compressor/template/qat.yaml), and [Pruning](../neural_compressor/template/pruning.yaml) scenarios. Refer to these template files to understand the meaning of each field. > Note that most fields in the yaml templates are optional. View the [HelloWorld Yaml](../examples/helloworld/tf_example2/conf.yaml) example for reference. ```python # Typical Launcher code -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common -# optional if LPOT built-in dataset could be used as model input in yaml +# optional if Neural Compressor built-in dataset could be used as model input in yaml class dataset(object): def __init__(self, *args): ... @@ -93,7 +93,7 @@ class dataset(object): def len(self): ... -# optional if LPOT built-in metric could be used to do accuracy evaluation on model output in yaml +# optional if Neural Compressor built-in metric could be used to do accuracy evaluation on model output in yaml class custom_metric(object): def __init__(self): ... @@ -104,31 +104,31 @@ class custom_metric(object): def result(self): # final metric calculation invoked only once after all mini-batch are evaluated - # return a scalar to lpot for accuracy-driven tuning. + # return a scalar to neural_compressor for accuracy-driven tuning. # by default the scalar is higher-is-better. if not, set tuning.accuracy_criterion.higher_is_better to false in yaml. ... quantizer = Quantization(conf.yaml) quantizer.model = common.Model('/path/to/model') -# below two lines are optional if LPOT built-in dataset is used as model calibration input in yaml +# below two lines are optional if Neural Compressor built-in dataset is used as model calibration input in yaml cal_dl = dataset('/path/to/calibration/dataset') quantizer.calib_dataloader = common.DataLoader(cal_dl, batch_size=32) -# below two lines are optional if LPOT built-in dataset is used as model evaluation input in yaml +# below two lines are optional if Neural Compressor built-in dataset is used as model evaluation input in yaml dl = dataset('/path/to/evaluation/dataset') quantizer.eval_dataloader = common.DataLoader(dl, batch_size=32) -# optional if LPOT built-in metric could be used to do accuracy evaluation in yaml +# optional if Neural Compressor built-in metric could be used to do accuracy evaluation in yaml quantizer.metric = common.Metric(custom_metric) q_model = quantizer() q_model.save('/path/to/output/dir') ``` -`model` attribute in `Quantization` class is an abstraction of model formats across different frameworks. LPOT supports passing the path of `keras model`, `frozen pb`, `checkpoint`, `saved model`, `torch.nn.model`, `mxnet.symbol.Symbol`, `gluon.HybirdBlock`, and `onnx model` to instantiate a `lpot.experimental.common.Model()` class and set to `quantizer.model`. +`model` attribute in `Quantization` class is an abstraction of model formats across different frameworks. Neural Compressor supports passing the path of `keras model`, `frozen pb`, `checkpoint`, `saved model`, `torch.nn.model`, `mxnet.symbol.Symbol`, `gluon.HybirdBlock`, and `onnx model` to instantiate a `neural_compressor.experimental.common.Model()` class and set to `quantizer.model`. `calib_dataloader` and `eval_dataloader` attribute in `Quantization` class is used to set up a calibration dataloader by code. It is optional to set if the user sets corresponding fields in yaml. -`metric` attribute in `Quantization` class is used to set up a custom metric by code. It is optional to set if user finds LPOT built-in metric could be used with their model and sets corresponding fields in yaml. +`metric` attribute in `Quantization` class is used to set up a custom metric by code. It is optional to set if user finds Neural Compressor built-in metric could be used with their model and sets corresponding fields in yaml. -`postprocess` attribute in `Quantization` class is not necessary in most of the use cases. It is only needed when the user wants to use the LPOT built-in metric but the model output can not directly be handled by LPOT built-in metrics. In this case, the user can register a transform to convert the model output to the expected one required by the LPOT built-in metric. +`postprocess` attribute in `Quantization` class is not necessary in most of the use cases. It is only needed when the user wants to use the built-in metric but the model output can not directly be handled by Neural Compressor built-in metrics. In this case, the user can register a transform to convert the model output to the expected one required by the built-in metric. `q_func` attribute in `Quantization` class is only for `Quantization Aware Training` case, in which the user needs to register a function that takes `model` as the input parameter and executes the entire training process with self-contained training hyper-parameters. @@ -167,7 +167,7 @@ class Pruning(object): ``` -This API is used to do sparsity pruning. Currently, it is a Proof of Concept; LPOT only supports `magnitude pruning` on PyTorch. +This API is used to do sparsity pruning. Currently, it is a Proof of Concept; Neural Compressor only supports `magnitude pruning` on PyTorch. To learn how to use this API, refer to the [pruning document](../docs/pruning.md). @@ -203,8 +203,8 @@ To learn how to use this API, refer to the [benchmarking document](../docs/bench ## Default user-facing APIs -The default user-facing APIs exist for backwards compatibility from the v1.0 release. Refer to [v1.1 API](https://github.com/intel/lpot/blob/v1.1/docs/introduction.md) to understand how the default user-facing APIs work. +The default user-facing APIs exist for backwards compatibility from the v1.0 release. Refer to [v1.1 API](https://github.com/intel/neural-compressor/blob/v1.1/docs/introduction.md) to understand how the default user-facing APIs work. View the [HelloWorld example](/examples/helloworld/tf_example6) that uses default user-facing APIs for user reference. -Full examples using default user-facing APIs can be found [here](https://github.com/intel/lpot/tree/v1.1/examples). +Full examples using default user-facing APIs can be found [here](https://github.com/intel/neural-compressor/tree/v1.1/examples). diff --git a/docs/benchmark.md b/docs/benchmark.md index 32bfd231ed7..5706a3ad013 100644 --- a/docs/benchmark.md +++ b/docs/benchmark.md @@ -1,15 +1,15 @@ Benchmarking ============ -The benchmarking feature of LPOT is used to measure the model performance with the objective settings; the user can get the performance of the models between the float32 model and the quantized low precision model in the same scenarios that they configured in Yaml. Benchmarking is always used after a quantization process. +The benchmarking feature of Neural Compressor is used to measure the model performance with the objective settings; the user can get the performance of the models between the float32 model and the quantized low precision model in the same scenarios that they configured in Yaml. Benchmarking is always used after a quantization process. The following examples show how to use benchmarking. ## Config evaluation filed in a yaml file ```yaml -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: @@ -49,11 +49,11 @@ The above example config two sub-fields named 'accuracy' and 'performance' which ## Use a user-specific dataloader to run benchmark -In this case, configure your dataloader and LPOT will construct an evaluation function to run the benchmarking. The user can also register the postprocess transform and metric to get the accuracy. +In this case, configure your dataloader and Neural Compressor will construct an evaluation function to run the benchmarking. The user can also register the postprocess transform and metric to get the accuracy. ```python dataset = Dataset() # dataset class that implement __getitem__ method or __iter__ method -from lpot.experimental import Benchmark, common +from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(config.yaml) evaluator.dataloader = common.DataLoader(dataset, batch_size=batch_size) # user can also register postprocess and metric, this is optional diff --git a/docs/dataloader.md b/docs/dataloader.md index 559a3f3a180..633684dbb63 100644 --- a/docs/dataloader.md +++ b/docs/dataloader.md @@ -3,7 +3,7 @@ DataLoader Deep Learning often encounters large datasets that are memory-consuming. Previously, working with large datasets required loading them into memory all at once. The constant lack of memory resulted in the need for an efficient data generation scheme. This is not only about handling the lack of memory in large datasets, but also about making the process of loading data faster using a multi-processing thread. We call the data generation object a DataLoader. -With the importance of a dataloader, different frameworks can have their own DataLoadermodule. As for LPOT, it needs to calibrate the inputs/outputs of each layer of the model; the framework-specific dataloader has different features and APIs that will make it hard to use them same way in the tool. Another request is that the tool also treat batch size as a tuning parameter which means the tool can dynamically change the batch size to get the accuracy target. The third reason is for ease of use; a unified DataLoader API can make it easy to config dataloader in a yaml file without any code modification. Considering about all these advantages, the tool has implemented an internal dataloader. +With the importance of a dataloader, different frameworks can have their own DataLoadermodule. As for Neural Compressor, it needs to calibrate the inputs/outputs of each layer of the model; the framework-specific dataloader has different features and APIs that will make it hard to use them same way in the tool. Another request is that the tool also treat batch size as a tuning parameter which means the tool can dynamically change the batch size to get the accuracy target. The third reason is for ease of use; a unified DataLoader API can make it easy to config dataloader in a yaml file without any code modification. Considering about all these advantages, the tool has implemented an internal dataloader. The dataloader takes a dataset as the input parameter and loads data from the dataset when needed. @@ -19,7 +19,7 @@ A dataset uses transform as its data process component. Transform contains three A general transform can be used in both preprocessing and postprocessing; one can also implement a specific transform by inheriting from the Transform class by implementing the `__call__` method. Usually, a dataloader will use the transform for preprocessing and the postprocessing transform is used to give the right processed data to the metric to update. Transforms also compose together to be one and serially implement the transforms. -Transform for preprocessing will be launched in the dataset `__getitem__` or `__next__` method; that means the transform will be used after the dataloader has loaded batched data and before the data given to the model for inference. That helps reduce the memory compared with load and process all data at once. Transform for postprocessing is used in evaluation function of the internal LPOT to process the inference data and the processed data used by metric. +Transform for preprocessing will be launched in the dataset `__getitem__` or `__next__` method; that means the transform will be used after the dataloader has loaded batched data and before the data given to the model for inference. That helps reduce the memory compared with load and process all data at once. Transform for postprocessing is used in evaluation function of the internal Neural Compressor to process the inference data and the processed data used by metric. # How to use it @@ -44,8 +44,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 dataloader: @@ -100,7 +100,7 @@ calib_data = mx.io.ImageRecordIter(path_imgrec=dataset, ctx=args.ctx, **combine_mean_std) -from lpot import Quantization, common +from neural_compressor import Quantization, common quantizer = Quantization('conf.yaml') quantizer.model = common.Model(fp32_model) quantizer.calib_dataloader = calib_data diff --git a/docs/dataset.md b/docs/dataset.md index 4f9d47036db..755570cde9b 100644 --- a/docs/dataset.md +++ b/docs/dataset.md @@ -1,76 +1,76 @@ Dataset ======= -Users can use LPOT built-in dataset objects as well as register their own datasets. +Users can use Neural Compressor built-in dataset objects as well as register their own datasets. ## Built-in dataset support list -LPOT supports built-in dataloaders on popular industry datasets. Refer to this [HelloWorld example](/examples/helloworld/tf_example6) to learn how to configure a built-in dataloader. +Neural Compressor supports built-in dataloaders on popular industry datasets. Refer to this [HelloWorld example](/examples/helloworld/tf_example6) to learn how to configure a built-in dataloader. #### TensorFlow | Dataset | Parameters | Comments | Usage | | :------ | :------ | :------ | :------ | -| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | -| ImageRecord(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/validation-000-of-100
root/validation-001-of-100
...
root/validation-099-of-100
The file name needs to follow this pattern: '* - * -of- *' | **In yaml file:**
dataset:
   ImageRecord:
     root: /path/to/root
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImageRecord'] (root=root, transform=transform, filter=None)
| -| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | -| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | -| COCORecord(root, num_cores, transform, filter) | **root** (str): Root directory of dataset
**num_cores** (int, default=28):The number of input Datasets to interleave from in parallel
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Root is a full path to tfrecord file, which contains the file name.
**Please use Resize transform when batch_size > 1** | **In yaml file:**
dataset:
   COCORecord:
     root: /path/to/tfrecord
     num_cores: 28
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCORecord'] (root, num_cores=28, transform=transform, filter=None) | -| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
**Please use Resize transform when batch_size > 1** | **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | -| COCONpy(root, npy_dir, anno_dir) | **root** (str): Root directory of dataset
**npy_dir** (str, default='val2017'): npy file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory | Please arrange data in this way:
/root/npy_dir/1.jpg.npy
/root/npy_dir/2.jpg.npy
...
/root/npy_dir/n.jpg.npy
/root/anno_dir
**Please use Resize transform when batch_size > 1** | **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     npy_dir: /path/to/npy
     anno_dir: /path/to/annotation
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCONpy'] (root, npy_dir, anno_dir)
If anno_dir is not set, the dataset will use default label map | -| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=False):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: False
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | -| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | -| style_transfer(content_folder, style_folder, crop_ratio, resize_shape, image_format, transform, filter) | **content_folder** (str):Root directory of content images
**style_folder** (str):Root directory of style images
**crop_ratio** (float, default=0.1):cropped ratio to each side
**resize_shape** (tuple, default=(256, 256)):target size of image
**image_format** (str, default='jpg'): target image format
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Dataset used for style transfer task. This Dataset is to construct a dataset from two specific image holders representing content image folder and style image folder. | **In yaml file:**
dataset:
   style_transfer:
     content_folder: /path/to/content_folder
     style_folder: /path/to/style_folder
     crop_ratio: 0.1
     resize_shape: [256, 256]
     image_format: 'jpg'
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['style_transfer'] (content_folder, style_folder, crop_ratio, resize_shape, image_format, transform=transform, filter=None) | -| TFRecordDataset(root, transform, filter) | **root** (str): filename of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions |Root is a full path to tfrecord file, which contains the file name. | **In yaml file:**
dataset:
   TFRecordDataset:
     root: /path/to/tfrecord
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['TFRecordDataset'] (root, transform=transform) | -| bert(root, label_file, task, transform, filter) | **root** (str): path of dataset
**label_file** (str): path of label file
**task** (str, default='squad'): task type of model
**model_type** (str, default='bert'): model type, support 'bert'.
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset supports tfrecord data, please refer to [Guide](../examples/tensorflow/nlp/bert_large_squad/README.md) to create tfrecord file first. | **In yaml file:**
dataset:
   bert:
     root: /path/to/root
     label_file: /path/to/label_file
     task: squad
     model_type: bert
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['bert'] (root, label_file, transform=transform) | +| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | +| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | +| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | +| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | +| ImageRecord(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/validation-000-of-100
root/validation-001-of-100
...
root/validation-099-of-100
The file name needs to follow this pattern: '* - * -of- *' | **In yaml file:**
dataset:
   ImageRecord:
     root: /path/to/root
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImageRecord'] (root=root, transform=transform, filter=None)
| +| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | +| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | +| COCORecord(root, num_cores, transform, filter) | **root** (str): Root directory of dataset
**num_cores** (int, default=28):The number of input Datasets to interleave from in parallel
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Root is a full path to tfrecord file, which contains the file name.
**Please use Resize transform when batch_size > 1** | **In yaml file:**
dataset:
   COCORecord:
     root: /path/to/tfrecord
     num_cores: 28
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCORecord'] (root, num_cores=28, transform=transform, filter=None) | +| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
**Please use Resize transform when batch_size > 1** | **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | +| COCONpy(root, npy_dir, anno_dir) | **root** (str): Root directory of dataset
**npy_dir** (str, default='val2017'): npy file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory | Please arrange data in this way:
/root/npy_dir/1.jpg.npy
/root/npy_dir/2.jpg.npy
...
/root/npy_dir/n.jpg.npy
/root/anno_dir
**Please use Resize transform when batch_size > 1** | **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     npy_dir: /path/to/npy
     anno_dir: /path/to/annotation
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCONpy'] (root, npy_dir, anno_dir)
If anno_dir is not set, the dataset will use default label map | +| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=False):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: False
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | +| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | +| style_transfer(content_folder, style_folder, crop_ratio, resize_shape, image_format, transform, filter) | **content_folder** (str):Root directory of content images
**style_folder** (str):Root directory of style images
**crop_ratio** (float, default=0.1):cropped ratio to each side
**resize_shape** (tuple, default=(256, 256)):target size of image
**image_format** (str, default='jpg'): target image format
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Dataset used for style transfer task. This Dataset is to construct a dataset from two specific image holders representing content image folder and style image folder. | **In yaml file:**
dataset:
   style_transfer:
     content_folder: /path/to/content_folder
     style_folder: /path/to/style_folder
     crop_ratio: 0.1
     resize_shape: [256, 256]
     image_format: 'jpg'
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['style_transfer'] (content_folder, style_folder, crop_ratio, resize_shape, image_format, transform=transform, filter=None) | +| TFRecordDataset(root, transform, filter) | **root** (str): filename of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions |Root is a full path to tfrecord file, which contains the file name. | **In yaml file:**
dataset:
   TFRecordDataset:
     root: /path/to/tfrecord
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['TFRecordDataset'] (root, transform=transform) | +| bert(root, label_file, task, transform, filter) | **root** (str): path of dataset
**label_file** (str): path of label file
**task** (str, default='squad'): task type of model
**model_type** (str, default='bert'): model type, support 'bert'.
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset supports tfrecord data, please refer to [Guide](../examples/tensorflow/nlp/bert_large_squad/README.md) to create tfrecord file first. | **In yaml file:**
dataset:
   bert:
     root: /path/to/root
     label_file: /path/to/label_file
     task: squad
     model_type: bert
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['bert'] (root, label_file, transform=transform) | #### PyTorch | Dataset | Parameters | Comments | Usage | | :------ | :------ | :------ | :------ | -| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | -| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | -| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | -| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
**Please use Resize transform when batch_size>1**| **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | -| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=False):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: False
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | -| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | -| bert(dataset, task, model_type, transform, filter) | **dataset** (list): list of data
**task** (str): the task of the model, support "classifier", "squad"
**model_type** (str, default='bert'): model type, support 'distilbert', 'bert', 'xlnet', 'xlm'
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This Dataset is to construct from the Bert TensorDataset and not a full implementation from yaml config. The original repo link is: https://github.com/huggingface/transformers. When you want use this Dataset, you should add it before you initialize your DataLoader. | **In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['bert'] (dataset, task, model_type, transform=transform, filter=None)
Now not support yaml implementation | +| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | +| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | +| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | +| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | +| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | +| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | +| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
**Please use Resize transform when batch_size>1**| **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | +| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=False):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: False
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | +| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | +| bert(dataset, task, model_type, transform, filter) | **dataset** (list): list of data
**task** (str): the task of the model, support "classifier", "squad"
**model_type** (str, default='bert'): model type, support 'distilbert', 'bert', 'xlnet', 'xlm'
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This Dataset is to construct from the Bert TensorDataset and not a full implementation from yaml config. The original repo link is: https://github.com/huggingface/transformers. When you want use this Dataset, you should add it before you initialize your DataLoader. | **In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['bert'] (dataset, task, model_type, transform=transform, filter=None)
Now not support yaml implementation | #### MXNet | Dataset | Parameters | Comments | Usage | | :------ | :------ | :------ | :------ | -| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | -| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | -| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | -| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
**Please use Resize transform when batch_size > 1**| **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | -| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=False):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: False
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | -| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | +| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | +| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | +| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | +| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | +| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | +| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | +| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
**Please use Resize transform when batch_size > 1**| **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | +| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=False):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: False
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | +| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | #### ONNXRT | Dataset | Parameters | Comments | Usage | | :------ | :------ | :------ | :------ | -| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | -| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | -| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | -| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | -| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
***Please use Resize transform when batch_size > 1**| **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | -| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=False):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: False
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | -| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | -| GLUE(data_dir, model_name_or_path, max_seq_length, do_lower_case, task, model_type, dynamic_length, evaluate, transform, filter) | **data_dir** (str): The input data dir
**model_name_or_path** (str): Path to pre-trained student model or shortcut name,
**max_seq_length** (int, default=128): The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.
**do_lower_case** (bool, default=True): Whether or not to lowercase the input.
**task** (bool, default=True): The name of the task to fine-tune. Choices include mrpc, qqp, qnli, rte, sts-b, cola, mnli, wnli.
**model_type** (str, default='bert'): model type, support 'distilbert', 'bert', 'mobilebert', 'roberta'.
**dynamic_length** (bool, default=False): Whether to use fixed sequence length.
**evaluate** (bool, default=True): Whether do evaluation or training.
**transform** (bool, default=True): If true,
**filter** (bool, default=True): If true, | Refer to [this example](/examples/onnxrt/language_translation/bert) on how to prepare dataset | **In yaml file:**
dataset:
   bert:
     data_dir: False
     model_name_or_path: True
(transform and filter are not set in the range of dataset)
**In user code:**
from lpot.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['bert'] (data_dir='/path/to/data/', model_name_or_path='bert-base-uncased', max_seq_length=128, task='mrpc', model_type='bert', dynamic_length=True, transform=None, filter=None) | +| MNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/MNIST/, otherwise user should put mnist.npz under root/MNIST/ manually. | **In yaml file:**
dataset:
   MNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['MNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | +| FashionMNIST(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train**(bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/FashionMNIST/, otherwise user should put train-labels-idx1-ubyte.gz, train-images-idx3-ubyte.gz, t10k-labels-idx1-ubyte.gz and t10k-images-idx3-ubyte.gz under root/FashionMNIST/ manually.| **In yaml file:**
dataset:
   FashionMNIST:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['FashionMNIST'] (root=root, train=False, transform=transform, filter=None, download=True) | +| CIFAR10(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR10:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR10'] (root=root, train=False, transform=transform, filter=None, download=True) | +| CIFAR100(root, train, transform, filter, download) | **root** (str): Root directory of dataset
**train** (bool, default=False): If True, creates dataset from train subset, otherwise from validation subset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions
**download** (bool, default=True): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. | If download is True, it will download dataset to root/ and extract it automatically, otherwise user can download file from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz manually to root/ and extract it. | **In yaml file:**
dataset:
   CIFAR100:
     root: /path/to/root
     train: False
     download: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['CIFAR100'] (root=root, train=False, transform=transform, filter=None, download=True) | +| ImageFolder(root, transform, filter) | **root** (str): Root directory of dataset
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
root/class_1/xxx.png
root/class_1/xxy.png
root/class_1/xxz.png
...
root/class_n/123.png
root/class_n/nsdf3.png
root/class_n/asd932_.png
Please put images of different categories into different folders. | **In yaml file:**
dataset:
   ImageFolder:
     root: /path/to/root
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImageFolder'] (root=root,transform=transform, filter=None) | +| ImagenetRaw(data_path, image_list, transform, filter) | **data_path** (str): Root directory of dataset
**image_list** (str): data file, record image_names and their labels
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
data_path/img1.jpg
data_path/img2.jpg
...
data_path/imgx.jpg
dataset will read name and label of each image from image_list file, if user set image_list to None, it will read from data_path/val_map.txt automatically. | **In yaml file:**
dataset:
   ImagenetRaw:
     data_path: /path/to/image
     image_list: /path/to/label
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['ImagenetRaw'] (data_path, image_list, transform=transform, filter=None) | +| COCORaw(root, img_dir, anno_dir, transform, filter) | **root** (str): Root directory of dataset
**img_dir** (str, default='val2017'): image file directory
**anno_dir** (str, default='annotations/instances_val2017.json'): annotation file directory
**transform** (transform object, default=None): transform to process input data
**filter** (Filter objects, default=None): filter out examples according to specific conditions | Please arrange data in this way:
/root/img_dir/1.jpg
/root/img_dir/2.jpg
...
/root/img_dir/n.jpg
/root/anno_dir
***Please use Resize transform when batch_size > 1**| **In yaml file:**
dataset:
   COCORaw:
     root: /path/to/root
     img_dir: /path/to/image
     anno_dir: /path/to/annotation
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['COCORaw'] (root, img_dir, anno_dir, transform=transform, filter=None)
If anno_dir is not set, the dataset will use default label map | +| dummy(shape, low, high, dtype, label, transform, filter) | **shape** (list or tuple):shape of total samples, the first dimension should be the sample count of the dataset. support create multi shape tensors, use list of tuples for each tuple in the list, will create a such size tensor.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**label** (bool, default=False):whether to return 0 as label
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy:
     shape: [3, 224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32
     label: False
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy'] (shape, low, high, dtype, label, transform=None, filter=None) | +| dummy_v2(input_shape, label_shape, low, high, dtype, transform, filter) | **input_shape** (list or tuple):create single or multi input tensors list represent the sample shape of the dataset, eg and image size should be represented as (224, 224, 3), tuple contains multiple list and represent multi input tensors.
**label_shape** (list or tuple):create single or multi label tensors list represent the sample shape of the label, eg and label size should be represented as (1,), tuple contains multiple list and represent multi label tensors.
**low** (list or float, default=-128.):low out the tensor value range from[0, 1] to [0, low] or [low, 0] if low < 0, if float, will implement all tensors with same low value.
**high** (list or float, default=127.):high the tensor value by add all tensor element value high. If list, length of list should be same with shape list
**dtype** (list or str, default='float32'):support multi tensor dtype setting. If list, length of list should be same with shape list, if str, all tensors will use same dtype. dtype support 'float32', 'float16', 'uint8', 'int8', 'int32', 'int64', 'bool'
**transform** (transform object, default=None): dummy dataset does not need transform. If transform is not None, it will ignore it.
**filter** (Filter objects, default=None): filter out examples according to specific conditions | This dataset is to construct a dataset from a specific shape, the value range is calculated from: low * stand_normal(0, 1) + high. | **In yaml file:**
dataset:
   dummy_v2:
     input_shape: [224, 224, 3]
     low: 0.0
     high: 127.0
     dtype: float32

**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['dummy_v2'] (input_shape, low, high, dtype, transform=None, filter=None) | +| GLUE(data_dir, model_name_or_path, max_seq_length, do_lower_case, task, model_type, dynamic_length, evaluate, transform, filter) | **data_dir** (str): The input data dir
**model_name_or_path** (str): Path to pre-trained student model or shortcut name,
**max_seq_length** (int, default=128): The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.
**do_lower_case** (bool, default=True): Whether or not to lowercase the input.
**task** (bool, default=True): The name of the task to fine-tune. Choices include mrpc, qqp, qnli, rte, sts-b, cola, mnli, wnli.
**model_type** (str, default='bert'): model type, support 'distilbert', 'bert', 'mobilebert', 'roberta'.
**dynamic_length** (bool, default=False): Whether to use fixed sequence length.
**evaluate** (bool, default=True): Whether do evaluation or training.
**transform** (bool, default=True): If true,
**filter** (bool, default=True): If true, | Refer to [this example](/examples/onnxrt/language_translation/bert) on how to prepare dataset | **In yaml file:**
dataset:
   bert:
     data_dir: False
     model_name_or_path: True
(transform and filter are not set in the range of dataset)
**In user code:**
from neural_compressor.experimental.data import DATASETS
datasets = DATASETS(framework)
dataset = datasets['bert'] (data_dir='/path/to/data/', model_name_or_path='bert-base-uncased', max_seq_length=128, task='mrpc', model_type='bert', dynamic_length=True, transform=None, filter=None) | ## User-specific dataset @@ -93,7 +93,7 @@ class Dataset(object): After defining the dataset class, pass it to the quantizer: ```python -from lpot import Quantization, common +from neural_compressor import Quantization, common quantizer = Quantization(yaml_file) quantizer.calib_dataloader = common.DataLoader(dataset) # user can pass more optional args to dataloader such as batch_size and collate_fn quantizer.model = common.Model(graph) diff --git a/docs/distillation.md b/docs/distillation.md index f2423ad897d..84fec61d5d0 100644 --- a/docs/distillation.md +++ b/docs/distillation.md @@ -11,10 +11,10 @@ Knowledge distillation is one of popular approaches of network compression, whic ### User facing API -LPOT distillation API is defined under `lpot.experimental.Distillation`, which takes a user defined yaml file as input. The user defined yaml defines distillation and evaluation behaviors. +Neural Compressor distillation API is defined under `neural_compressor.experimental.Distillation`, which takes a user defined yaml file as input. The user defined yaml defines distillation and evaluation behaviors. ```python -# distillation.py in lpot/experimental +# distillation.py in neural_compressor/experimental class Distillation(): def __init__(self, conf_fname_or_obj): # The initialization function of distillation, taking the path or Distillation_Conf class to user-defined yaml as input @@ -40,13 +40,13 @@ class Distillation(): def train_func(self, user_train_func) # The training function provided by user. This function takes framework runtime model object as input parameter, # and executes entire training process with self contained training hyper-parameters. - # It is optional if training could be configured by lpot built-in dataloader/optimizer/criterion. + # It is optional if training could be configured by neural_compressor built-in dataloader/optimizer/criterion. ... @eval_func.setter def eval_func(self, user_eval_func) # The evaluation function provided by user. This function takes framework runtime model object as input parameter and executes evaluation process. - # It is optional if evaluation could be configured by lpot built-in dataloader/optimizer/criterion. + # It is optional if evaluation could be configured by neural_compressor built-in dataloader/optimizer/criterion. ... @train_dataloader.setter @@ -88,7 +88,7 @@ class Distillation(): Simplest launcher code if training behavior is defined in user-defined yaml. ```python -from lpot.experimental import Distillation, common +from neural_compressor.experimental import Distillation, common distiller = Distillation('/path/to/user/yaml') distiller.student_model = common.Model(student_model) distiller.teacher_model = common.Model(teacher_model) @@ -141,8 +141,8 @@ distillation: momentum: 0.9 weight_decay: 0.0004 nesterov: False -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: @@ -162,10 +162,10 @@ evaluation: # optional. required if user doesn't pr #### `train` -The `train` section defines the training behavior, including what training hyper-parameter would be used and which dataloader is used during training. For criterion, we provided a built-in knowledge distillation loss class for distillation loss calculation. It is defined under `lpot.experimental.common.criterion` with following structure. +The `train` section defines the training behavior, including what training hyper-parameter would be used and which dataloader is used during training. For criterion, we provided a built-in knowledge distillation loss class for distillation loss calculation. It is defined under `neural_compressor.experimental.common.criterion` with following structure. ```python -# criterion.py in lpot/experimental/common +# criterion.py in neural_compressor/experimental/common class KnowledgeDistillationLoss(): def __init__(self, temperature=1.0, loss_types=['CE', 'CE'], @@ -189,9 +189,9 @@ class KnowledgeDistillationLoss(): ### Distillation with user-defined train_func() -User can pass the customized training/evaluation functions to `Distillation` for flexible scenarios. In this case, distillation process can be done by pre-defined hooks in LPOT. User needs to put those hooks inside the training function. +User can pass the customized training/evaluation functions to `Distillation` for flexible scenarios. In this case, distillation process can be done by pre-defined hooks in Neural Compressor. User needs to put those hooks inside the training function. -LPOT defines several hooks for user pass +Neural Compressor defines several hooks for user pass ``` pre_epoch_begin() : Hook executed before training begins @@ -231,8 +231,8 @@ def train_func(model): In this case, the launcher code is like the following: ```python -from lpot.experimental import Distillation, common -from lpot.experimental.common.criterion import PyTorchKnowledgeDistillationLoss +from neural_compressor.experimental import Distillation, common +from neural_compressor.experimental.common.criterion import PyTorchKnowledgeDistillationLoss distiller = Distillation(args.config) distiller.student_model = common.Model(model) distiller.teacher_model = common.Model(teacher) @@ -243,8 +243,8 @@ model = distiller() ## Examples -### Examples in LPOT -Following examples are supported in LPOT: +### Examples in Neural Compressor +Following examples are supported in Neural Compressor: - CNN Examples: - [ResNet example](../examples/pytorch/eager/image_recognition/imagenet/cpu/distillation/README.md): distillation of ResNet50 to ResNet18 on ImageNet dataset. diff --git a/docs/distributed.md b/docs/distributed.md index c887237410a..935c2678334 100644 --- a/docs/distributed.md +++ b/docs/distributed.md @@ -3,7 +3,7 @@ Distributed Training ## Introduction -LPOT uses [horovod](https://github.com/horovod/horovod) for distributed training. +Neural Compressor uses [horovod](https://github.com/horovod/horovod) for distributed training. ## horovod installation @@ -17,13 +17,13 @@ pip install horovod Distributed training is supported in PyTorch currently, TensorFlow support is working in progress. To enable distributed training, the steps are: 1. Setting up distributed training scripts. We have 2 options here: - - Option 1: Enable distributed training with pure yaml configuration. In this case, LPOT builtin training function is used. - - Option 2: Pass the user defined training function to LPOT. In this case, please follow the horovod documentation and below example to know how to write such training function with horovod on different frameworks. + - Option 1: Enable distributed training with pure yaml configuration. In this case, Neural Compressor builtin training function is used. + - Option 2: Pass the user defined training function to Neural Compressor. In this case, please follow the horovod documentation and below example to know how to write such training function with horovod on different frameworks. 2. use horovodrun to execute your program. ### Option 1: pure yaml configuration -To enable distributed training in LPOT, user only need to add a field: `Distributed: True` in dataloader configuration: +To enable distributed training in Neural Compressor, user only need to add a field: `Distributed: True` in dataloader configuration: ``` dataloader: @@ -34,10 +34,10 @@ dataloader: root: /path/to/dataset ``` -In user's code, pass the yaml file to LPOT, and LPOT internally wrap the dataloader for the distributed training. The example codes are as following: +In user's code, pass the yaml file to Neural Compressor components, in which it constructs the real dataloader for the distributed training. The example codes are as following: ``` -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization(yaml_file) quantizer.model = common.Model(model) q_model = quantizer() @@ -45,7 +45,7 @@ q_model = quantizer() ### Option2: user defined training function -LPOT supports User defined training function for distributed training which requires user to modify training script following horovod requirements. We provide a MNIST example to show how to do that and following are the steps for PyTorch. +Neural Compressor supports User defined training function for distributed training which requires user to modify training script following horovod requirements. We provide a MNIST example to show how to do that and following are the steps for PyTorch. - Partition dataset via DistributedSampler: @@ -93,10 +93,10 @@ def train_func(model): return train(args, model, train_loader, optimizer) ``` -- Use user defined training function in LPOT: +- Use user defined training function in Neural Compressor: ``` -from lpot.experimental import Component, common +from neural_compressor.experimental import Component, common component = Component(yaml_file) component.model = common.Model(model) component.train_func = train_func @@ -116,7 +116,7 @@ horovodrun -np -H python train.py ## security -horovodrun requires user set up SSH on all hosts without any prompts. To do distributed training with LPOT, user needs to ensure the SSH setting on all hosts. +horovodrun requires user set up SSH on all hosts without any prompts. To do distributed training with Neural Compressor, user needs to ensure the SSH setting on all hosts. ## Examples Following PyTorch examples are supported: diff --git a/docs/doclist.rst b/docs/doclist.rst index 6279f5f96f2..27a9555a2eb 100644 --- a/docs/doclist.rst +++ b/docs/doclist.rst @@ -1,15 +1,15 @@ Developer Documentation ####################### -Read the following material as you learn how to use LPOT. +Read the following material as you learn how to use Neural Compressor. Get Started =========== -* `Transform `__ introduces how to utilize LPOT's built-in data processing and how to develop a custom data processing method. -* `Dataset `__ introduces how to utilize LPOT's built-in dataset and how to develop a custom dataset. -* `Metrics `__ introduces how to utilize LPOT's built-in metrics and how to develop a custom metric. -* `UX `__ is a web-based system used to simplify LPOT usage. +* `Transform `__ introduces how to utilize Neural Compressor's built-in data processing and how to develop a custom data processing method. +* `Dataset `__ introduces how to utilize Neural Compressor's built-in dataset and how to develop a custom dataset. +* `Metrics `__ introduces how to utilize Neural Compressor's built-in metrics and how to develop a custom metric. +* `UX `__ is a web-based system used to simplify Neural Compressor usage. * `Intel oneAPI AI Analytics Toolkit Get Started Guide `__ explains the AI Kit components, installation and configuration guides, and instructions for building and running sample apps. * `AI and Analytics Samples `__ includes code samples for Intel oneAPI libraries. @@ -28,9 +28,9 @@ Get Started Deep Dive ========= -* `Quantization `__ are processes that enable inference and training by performing computations at low-precision data types, such as fixed-point integers. LPOT supports Post-Training Quantization (`PTQ `__) and Quantization-Aware Training (`QAT `__). Note that `Dynamic Quantization `__ currently has limited support. +* `Quantization `__ are processes that enable inference and training by performing computations at low-precision data types, such as fixed-point integers. Neural Compressor supports Post-Training Quantization (`PTQ `__) and Quantization-Aware Training (`QAT `__). Note that `Dynamic Quantization `__ currently has limited support. * `Pruning `__ provides a common method for introducing sparsity in weights and activations. -* `Benchmarking `__ introduces how to utilize the benchmark interface of LPOT. +* `Benchmarking `__ introduces how to utilize the benchmark interface of Neural Compressor. * `Mixed precision `__ introduces how to enable mixed precision, including BFP16 and int8 and FP32, on Intel platforms during tuning. * `Graph Optimization `__ introduces how to enable graph optimization for FP32 and auto-mixed precision. * `Model Conversion ` introduces how to convert TensorFlow QAT model to quantized model running on Intel platforms. @@ -56,7 +56,7 @@ Deep Dive Advanced Topics =============== -* `Adaptor `__ is the interface between LPOT and framework. The method to develop adaptor extension is introduced with ONNX Runtime as example. +* `Adaptor `__ is the interface between Neural Compressor and framework. The method to develop adaptor extension is introduced with ONNX Runtime as example. * `Tuning strategies `__ can automatically optimized low-precision recipes for deep learning models to achieve optimal product objectives like inference performance and memory usage with expected accuracy criteria. The method to develop a new strategy is introduced. diff --git a/docs/engine.md b/docs/engine.md index 893ef89f61a..c867d796435 100644 --- a/docs/engine.md +++ b/docs/engine.md @@ -23,19 +23,19 @@ conda install absl-py --yes ``` -### 1. install lpot +### 1. install neural-compressor -As engine is part of lpot, just install lpot will build the binary and engine interface +As engine is part of neural_compressor, just install neural-compressor will build the binary and engine interface ``` -pip install lpot +pip install neural-compressor ``` ### 2. install C++ binary by deploy bare metal engine ``` -cd /engine/executor +cd /engine/executor mkdir build cd build cmake .. @@ -153,12 +153,12 @@ Engine python api support input numpy array and output numpy array. if you have The `input_ids`, `segment_ids` and `input_mask` are the input numpy array data of a bert model, which have size (batch_size, seq_len). Note that the `out` is a list contains the bert model output numpy data (`out=[output numpy data]`). -## Get a low precision model using lpot tool +## Get a low precision model using neural_compressor tool You may have a tensorflow or onnx model and want to have an high performance int8 engine ir, that will be easy to have ``` -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common ds = TF_BERTDataSet(args.data_dir, args.vocab_file, args.do_lower_case) quantizer = Quantization(args.config) quantizer.model = common.Model(args.input_model) @@ -171,7 +171,7 @@ q_model.save(args.output_model) The output_model is the generated int8 ir of engine. you can also test the benchmark of the engine model ``` -from lpot.experimental import Benchmark, common +from neural_compressor.experimental import Benchmark, common ds = TF_BERTDataSet(args.data_dir, args.vocab_file, args.do_lower_case) evaluator = Benchmark(args.config) evaluator.model = common.Model(args.input_model) @@ -180,5 +180,5 @@ evaluator(args.mode) ``` -Reference examples can be found at /examples/engine/nlp +Reference examples can be found at /examples/engine/nlp diff --git a/docs/full_model_list.md b/docs/full_model_list.md index 030252bddd6..e37e832770a 100644 --- a/docs/full_model_list.md +++ b/docs/full_model_list.md @@ -1,7 +1,7 @@ Full Validated Models ===================== -The below tables are models enabled by the Intel® Low Precision Optimization Tool. +The below tables are models enabled by the Intel® Neural Compressor. ### TensorFlow 2.x models diff --git a/docs/graph_optimization.md b/docs/graph_optimization.md index 1895cd7d2bc..d55fa95b2ec 100644 --- a/docs/graph_optimization.md +++ b/docs/graph_optimization.md @@ -5,9 +5,9 @@ Graph Optimization Graph optimization is primarily focused on two scenarios, shown below: -1. **FP32 optimization**. This is similar to the TensorFlow optimization tool [optimize_for_inference](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/optimize_for_inference.py) while LPOT enables more optimizations (such as common subexpression elimination). +1. **FP32 optimization**. This is similar to the TensorFlow optimization tool [optimize_for_inference](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/optimize_for_inference.py) while Neural Compressor enables more optimizations (such as common subexpression elimination). -2. **Auto-mixed precision optimization**. LPOT generates the optimal model with auto-mixed precision ([bfloat16](https://cloud.google.com/tpu/docs/bfloat16) and FP32) and allows for additional auto-tuning per accuracy requirements. +2. **Auto-mixed precision optimization**. Neural Compressor generates the optimal model with auto-mixed precision ([bfloat16](https://cloud.google.com/tpu/docs/bfloat16) and FP32) and allows for additional auto-tuning per accuracy requirements. ## How to use it @@ -16,10 +16,10 @@ See the following three examples which demonstrate graph optimization API usage. ### FP32 Optimization -LPOT runs the graph optimization under FP32 Optimization by default. In other words, the **precisions** field is explicitly set to **fp32**: +Neural Compressor runs the graph optimization under FP32 Optimization by default. In other words, the **precisions** field is explicitly set to **fp32**: ```python - from lpot.experimental import Graph_Optimization + from neural_compressor.experimental import Graph_Optimization graph_optimizer = Graph_Optimization() graph_optimizer.precisions = 'fp32' #Optional, default is 'fp32' graph_optimizer.input = 'input' # Optional @@ -35,7 +35,7 @@ LPOT runs the graph optimization under FP32 Optimization by default. In other wo The only difference between this and the default mode (FP32 optimization) is that **bf16** must be added to the **precisions** field. ```python - from lpot.experimental import Graph_Optimization + from neural_compressor.experimental import Graph_Optimization graph_optimizer = Graph_Optimization() graph_optimizer.precisions = 'bf16, fp32' graph_optimizer.input = 'input' # Optional @@ -45,19 +45,19 @@ The only difference between this and the default mode (FP32 optimization) is tha ``` Note the **fp32** is optional when the **bf16** is set to precisions field. The below example has the identical action under the hardware platform supports bf16, e.g, the CPX platform. ```python - from lpot.experimental import Graph_Optimization + from neural_compressor.experimental import Graph_Optimization graph_optimizer = Graph_Optimization() graph_optimizer.precisions = 'bf16' graph_optimizer.model = '/path/to/model' optimized_model = graph_optimizer() ``` -For those platforms without bf16 enabling, like CLX. LPOT also could leverage the graph optimization feature to generate the model under bf16 precision.The usage is just adding the `FORCE_BF16=1` before the cmd. -e.g, `FORCE_BF16=1 /path/to/executable_lpot_wrapper`. If we don't add such prefix `FORCE_BF16=1`, the LPOT would exit consequently. +For those platforms without bf16 enabling, like CLX. Neural Compressor also could leverage the graph optimization feature to generate the model under bf16 precision.The usage is just adding the `FORCE_BF16=1` before the cmd. +e.g, `FORCE_BF16=1 /path/to/executable_nc_wrapper`. If we do not add such prefix `FORCE_BF16=1`, the program would exit consequently. #### Auto-mixed precision with auto-tuning -LPOT also supports tuning the model in graph optimization mode. The end user must replace the quantization field with graph_optimization parts such as shown below. The **precisions** field only supports **bf16** and **fp32**. +Neural Compressor also supports tuning the model in graph optimization mode. The end user must replace the quantization field with graph_optimization parts such as shown below. The **precisions** field only supports **bf16** and **fp32**. ```yaml graph_optimization: @@ -65,12 +65,12 @@ LPOT also supports tuning the model in graph optimization mode. The end user mus ``` Note that if we remove the evaluation field from the yaml file, the graph optimization will only convert the model depending on the precisions setting. -When the graph_optimization field is set and the evaluation field exists in the yaml file, LPOT executes the similar process like quantization. It means the LPOT converts op into bf16 as much as possible and checks the metric later. If the metric meets the criterion, LPOT exits or it fallbacks one op to fp32 and re-runs the above process until it meets the exit policy setting. +When the graph_optimization field is set and the evaluation field exists in the yaml file, Neural Compressor executes the similar process like quantization. It converts op into bf16 as much as possible and checks the metric later. If the metric meets the criterion, Neural Compressor exits or it fallbacks one op to fp32 and re-runs the above process until it meets the exit policy setting. Below is an example of using yaml to trigger graph optimization. ```python - from lpot.experimental import Graph_Optimization + from neural_compressor.experimental import Graph_Optimization graph_optimizer = Graph_Optimization('/path/to/config.yaml') graph_optimizer.model = '/path/to/model' optimized_model = graph_optimizer() @@ -98,7 +98,7 @@ Graph_Optimization class also support Graph_Optimization_Conf class as it's argu ``` 2. Measure the performance on original FP32 model. - First of all, we create the **resnet50_measurement.yaml** with below settings for leveraging LPOT Benchmark API. + First of all, we create the **resnet50_measurement.yaml** with below settings for leveraging Neural Compressor Benchmark API. ```yaml model: @@ -119,7 +119,7 @@ Graph_Optimization class also support Graph_Optimization_Conf class as it's argu Then, we can leverage the Benchmark API to measure the performance. ```python - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark('/path/to/resnet50_measurement.yaml') evaluator.model = '/path/to/resnet50_fp32_pretrained_model.pb' evaluator('performance') @@ -135,16 +135,16 @@ Graph_Optimization class also support Graph_Optimization_Conf class as it's argu ``` 3. Re-Measure the performance on optimized FP32 model. ```python - from lpot.experimental import Graph_Optimization + from neural_compressor.experimental import Graph_Optimization graph_optimizer = Graph_Optimization() graph_optimizer.model = '/path/to/resnet50_fp32_pretrained_model.pb' output_graph = graph_optimizer() output_graph.save('/path/to/fp32_optimized_model') ``` -Then, We measure the optimized performance via LPOT Benchmark API again. +Then, We measure the optimized performance via Neural Compressor Benchmark API again. ```python - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark('/path/to/resnet50_measurement.yaml') evaluator.model = '/path/to/fp32_optimized_model' evaluator('performance') diff --git a/docs/imgs/infrastructure.png b/docs/imgs/infrastructure.png index 729aba080e7..3a63184784f 100644 Binary files a/docs/imgs/infrastructure.png and b/docs/imgs/infrastructure.png differ diff --git a/docs/incompatible_changes.md b/docs/incompatible_changes.md index 98f8500932b..89fcdd7540c 100644 --- a/docs/incompatible_changes.md +++ b/docs/incompatible_changes.md @@ -4,7 +4,7 @@ The user-facing APIs are changed between v1.2 and v1.1. The major changes are: -1. v1.2 abstracts `lpot.common.Model` concept to cover those cases whose weight and graph files are stored separately. +1. v1.2 abstracts `neural_compressor.common.Model` concept to cover those cases whose weight and graph files are stored separately. 2. v1.2 unifies the calling style by setting model, calibration dataloader, evaluation dataloader, and metric through `quantizer` attributes rather than passing as function inputs. @@ -36,6 +36,6 @@ q_model.save('/path/to/output/dir') # explicitly call to save q_model ## Built-in transform/dataset/metric APIs -v1.2 refines LPOT built-in transform/dataset/metric to unify APIs cross different framework backends. +v1.2 refines Neural Compressor built-in transform/dataset/metric to unify APIs cross different framework backends. Refer to [dataset](./dataset.md), [transform](./transform.md), and [metric](./metric.md) to learn how to use them in yaml or code. diff --git a/docs/metric.md b/docs/metric.md index 9c2e4257c1a..7f99ab2b3b9 100644 --- a/docs/metric.md +++ b/docs/metric.md @@ -1,13 +1,13 @@ Metrics ======= -In terms of evaluating the performance of a specific model, we should have general metrics to measure the performance of different models. Different frameworks always have their own Metric module but with different features and APIs. LPOT Metrics supports code-free configuration through a yaml file, with built-in metrics, so that LPOT can achieve performance and accuracy without code changes from the user. In special cases, users can also register their own metric classes through the LPOT method. +In terms of evaluating the performance of a specific model, we should have general metrics to measure the performance of different models. Different frameworks always have their own Metric module but with different features and APIs. Neural Compressor Metrics supports code-free configuration through a yaml file, with built-in metrics, so that Neural Compressor can achieve performance and accuracy without code changes from the user. In special cases, users can also register their own metric classes through below method. ## How to use Metrics ### Config built-in metric in a yaml file -Users can specify an LPOT built-in metric such as shown below: +Users can specify an Neural Compressor built-in metric such as shown below: ```yaml evaluation: @@ -44,7 +44,7 @@ After defining the metric class, users need to register it with a user-defined m ```python -from lpot.quantization import Quantization, common +from neural_compressor.quantization import Quantization, common quantizer = Quantization(yaml_file) quantizer.model = common.Model(graph) quantizer.metric = common.Metric(NewMetric, 'metric_name') @@ -55,7 +55,7 @@ q_model = quantizer() ## Built-in metric support list -LPOT supports some built-in metrics that are popularly used in industry. +Neural Compressor supports some built-in metrics that are popularly used in industry. Refer to [this HelloWorld example](/examples/helloworld/tf_example1) on how to config a built-in metric. diff --git a/docs/mixed_precision.md b/docs/mixed_precision.md index f73fc79707f..2d298700373 100644 --- a/docs/mixed_precision.md +++ b/docs/mixed_precision.md @@ -9,7 +9,7 @@ The recently launched 3rd Gen Intel® Xeon® Scalable processor (codenamed Coope Intel has worked with the TensorFlow development team to enhance TensorFlow to include bfloat16 data support for CPUs. For more information about BF16 in TensorFlow, please read [Accelerating AI performance on 3rd Gen Intel® Xeon® Scalable processors with TensorFlow and Bfloat16](https://blog.tensorflow.org/2020/06/accelerating-ai-performance-on-3rd-gen-processors-with-tensorflow-bfloat16.html). -Intel® Low Precision Optimization Tool can support op-wise BF16 precision for TensorFlow now. With BF16 support, it can get a mixed precision model with acceptable accuracy and performance or others objective goals. This document will give a simple introduction of TensorFlow BF16 convert transformation and how to use the BF16. +Intel® Neural Compressor can support op-wise BF16 precision for TensorFlow now. With BF16 support, it can get a mixed precision model with acceptable accuracy and performance or others objective goals. This document will give a simple introduction of TensorFlow BF16 convert transformation and how to use the BF16. ## BF16 Convert Transformation in TensorFlow diff --git a/docs/model.md b/docs/model.md index 54f4579fc1e..a956c9e429c 100644 --- a/docs/model.md +++ b/docs/model.md @@ -1,12 +1,12 @@ Model ===== -The LPOT Model feature is used to encapsulate the behavior of model building and saving. By simply providing information such as different model formats and framework_specific_info, LPOT performs optimizations and quantization on this model object and returns an LPOT Model object for further model persisting or benchmarking. An LPOT Model helps users to maintain necessary model information which is needed during optimization and quantization such as the input/output names, workspace path, and other model format knowledge. This helps unify the features gap brought by different model formats and frameworks. +The Neural Compressor Model feature is used to encapsulate the behavior of model building and saving. By simply providing information such as different model formats and framework_specific_info, Neural Compressor performs optimizations and quantization on this model object and returns an Neural Compressor Model object for further model persisting or benchmarking. An Neural Compressor Model helps users to maintain necessary model information which is needed during optimization and quantization such as the input/output names, workspace path, and other model format knowledge. This helps unify the features gap brought by different model formats and frameworks. Users can create, use, and save models in the following manner: ```python -from lpot import Quantization, common +from neural_compressor import Quantization, common quantizer = Quantization('./conf.yaml') quantizer.model = common.Model('/path/to/model') q_model = quantizer() @@ -20,16 +20,16 @@ q_model.save(save_path) | Model format | Parameters | Comments | Usage | | ------ | ------ |------|------| -| frozen pb | **model**(str): path to frozen pb
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Examples**:
[../examples/tensorflow/image_recognition](../examples/tensorflow/image_recognition)
[../examples/tensorflow/oob_models](../examples/tensorflow/oob_models)
**Save format**:
frozen pb | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the path of model, like ./path/to/frozen.pb** | -| Graph object | **model**(tf.compat.v1.Graph): tf.compat.v1.Graph object
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Examples**:
[../examples/tensorflow/style_transfer](../examples/tensorflow/style_transfer)
[../examples/tensorflow/recommendation/wide_deep_large_ds](../examples/tensorflow/recommendation/wide_deep_large_ds)
**Save format**:
frozen pb | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the object of tf.compat.v1.Graph** | -| Graph object | **model**(tf.compat.v1.GraphDef) tf.compat.v1.GraphDef object
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Save format**:
frozen pb | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the object of tf.compat.v1.GraphDef** | -| tf1.x checkpoint | **model**(str): path to checkpoint
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Examples**:
[../examples/helloworld/tf_example4](../examples/helloworld/tf_example4)
[../examples/tensorflow/object_detection](../examples/tensorflow/object_detection)
**Save format**:
frozen pb | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the path of model, like ./path/to/ckpt/** | -| keras.Model object | **model**(tf.keras.Model): tf.keras.Model object
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Save format**:
keras saved model | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the object of tf.keras.Model** | -| keras saved model | **model**(str): path to keras saved model
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Examples**:
[../examples/helloworld/tf_example2](../examples/helloworld/tf_example2)
**Save format**:
keras saved model | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the path of model, like ./path/to/saved_model/** | -| tf2.x saved model | **model**(str): path to saved model
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Save format**:
saved model | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the path of model, like ./path/to/saved_model/** | +| frozen pb | **model**(str): path to frozen pb
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Examples**:
[../examples/tensorflow/image_recognition](../examples/tensorflow/image_recognition)
[../examples/tensorflow/oob_models](../examples/tensorflow/oob_models)
**Save format**:
frozen pb | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the path of model, like ./path/to/frozen.pb** | +| Graph object | **model**(tf.compat.v1.Graph): tf.compat.v1.Graph object
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Examples**:
[../examples/tensorflow/style_transfer](../examples/tensorflow/style_transfer)
[../examples/tensorflow/recommendation/wide_deep_large_ds](../examples/tensorflow/recommendation/wide_deep_large_ds)
**Save format**:
frozen pb | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the object of tf.compat.v1.Graph** | +| Graph object | **model**(tf.compat.v1.GraphDef) tf.compat.v1.GraphDef object
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Save format**:
frozen pb | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the object of tf.compat.v1.GraphDef** | +| tf1.x checkpoint | **model**(str): path to checkpoint
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Examples**:
[../examples/helloworld/tf_example4](../examples/helloworld/tf_example4)
[../examples/tensorflow/object_detection](../examples/tensorflow/object_detection)
**Save format**:
frozen pb | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the path of model, like ./path/to/ckpt/** | +| keras.Model object | **model**(tf.keras.Model): tf.keras.Model object
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Save format**:
keras saved model | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the object of tf.keras.Model** | +| keras saved model | **model**(str): path to keras saved model
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Examples**:
[../examples/helloworld/tf_example2](../examples/helloworld/tf_example2)
**Save format**:
keras saved model | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the path of model, like ./path/to/saved_model/** | +| tf2.x saved model | **model**(str): path to saved model
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Save format**:
saved model | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the path of model, like ./path/to/saved_model/** | | tf2.x h5 format model | | TBD | | -| slim checkpoint | **model**(str): path to slim checkpoint
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Examples**:
[../examples/helloworld/tf_example3](../examples/helloworld/tf_example3)
**Save format**:
frozen pb | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is thepath of model, like ./path/to/model.ckpt**| -| tf1.x saved model | **model**(str): path to saved model, **framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Save format**:
saved model | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the path of model, like ./path/to/saved_model/** | +| slim checkpoint | **model**(str): path to slim checkpoint
**framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Examples**:
[../examples/helloworld/tf_example3](../examples/helloworld/tf_example3)
**Save format**:
frozen pb | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is thepath of model, like ./path/to/model.ckpt**| +| tf1.x saved model | **model**(str): path to saved model, **framework_specific_info**(dict): information about model and framework, such as input_tensor_names, input_tensor_names, workspace_path and name
**kwargs**(dict): other required parameters | **Save format**:
saved model | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the path of model, like ./path/to/saved_model/** | | tf2.x checkpoint | | Not support yes. As tf2.x checkpoint only has weight and does not contain any description of the computation, please use different tf2.x model for quantization | | The following methods can be used in the TensorFlow model: @@ -50,8 +50,8 @@ output_tensor = model.output_tensor | Model format | Parameters | Comments | Usage | | ------ | ------ |------|------| -| mxnet.gluon.HybridBlock | **model**(mxnet.gluon.HybridBlock): mxnet.gluon.HybridBlock object
**framework_specific_info**(dict): information about model and framework
**kwargs**(dict): other required parameters | **Save format**:
save_path.json | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is mxnet.gluon.HybridBlock object** | -| mxnet.symbol.Symbol | **model**(tuple): tuple of symbol, arg_params, aux_params
**framework_specific_info**(dict): information about model and framework
**kwargs**(dict): other required parameters | **Save format**:
save_path-symbol.json and save_path-0000.params | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the tuple of symbol, arg_params, aux_params** | +| mxnet.gluon.HybridBlock | **model**(mxnet.gluon.HybridBlock): mxnet.gluon.HybridBlock object
**framework_specific_info**(dict): information about model and framework
**kwargs**(dict): other required parameters | **Save format**:
save_path.json | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is mxnet.gluon.HybridBlock object** | +| mxnet.symbol.Symbol | **model**(tuple): tuple of symbol, arg_params, aux_params
**framework_specific_info**(dict): information about model and framework
**kwargs**(dict): other required parameters | **Save format**:
save_path-symbol.json and save_path-0000.params | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is the tuple of symbol, arg_params, aux_params** | * Get symbol, arg_params, aux_params from symbol and param files. @@ -75,13 +75,13 @@ for k, v in save_dict.items(): | Model format | Parameters | Comments | Usage | | ------ | ------ |------|------| -| torch.nn.model | **model**(torch.nn.model): torch.nn.model object
**framework_specific_info**(dict): information about model and framework
**kwargs**(dict): other required parameters | **Save format**:
Without Intel PyTorch Extension(IPEX): /save_path/best_configure.yaml and /save_path/best_model_weights.pt
With IPEX: /save_path/best_configure.json | from lpot.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is torch.nn.model object** | +| torch.nn.model | **model**(torch.nn.model): torch.nn.model object
**framework_specific_info**(dict): information about model and framework
**kwargs**(dict): other required parameters | **Save format**:
Without Intel PyTorch Extension(IPEX): /save_path/best_configure.yaml and /save_path/best_model_weights.pt
With IPEX: /save_path/best_configure.json | from neural_compressor.experimental import Quantization, common
quantizer = Quantization(args.config)
quantizer.model = common.Model(model)
q_model = quantizer()
**model is torch.nn.model object** | * Loading model: ```python # Without IPEX -from lpot.utils.pytorch import load +from neural_compressor.utils.pytorch import load quantized_model = load( os.path.abspath(os.path.expanduser(Path)), model) # model is a fp32 model diff --git a/docs/model_conversion.md b/docs/model_conversion.md index be9d436363b..cab97769f43 100644 --- a/docs/model_conversion.md +++ b/docs/model_conversion.md @@ -12,7 +12,7 @@ Now it supports QAT(quantization aware training) model to default(quantized) mod See the following example which demonstrate model conversion API usage. ```python - from lpot.experimental import ModelConversion, common + from neural_compressor.experimental import ModelConversion, common conversion = ModelConversion() conversion.source = 'QAT' conversion.destination = 'default' @@ -23,7 +23,7 @@ See the following example which demonstrate model conversion API usage. After this conversion is done, user could measure the accuracy or performance on quantized model. ```python - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark('/path/to/yaml') evaluator.model = common.Model('/path/to/quantized/saved_model') evaluator.b_dataloader = ... # create benchmark dataloader like examples/tensorflow/qat/benchmark.py diff --git a/docs/pruning.md b/docs/pruning.md index 07438f79354..0086139df38 100755 --- a/docs/pruning.md +++ b/docs/pruning.md @@ -17,7 +17,7 @@ Unstructured pruning means pruning unstructured sparsity (aka random sparsity) p Filter/Channel pruning means pruning a larger part of the network, such as filters or layers, according to some rules. -## Pruning Algorithms supported by LPOT +## Pruning Algorithms supported by Neural Compressor | Pruning Type | Algorithm | PyTorch | Tensorflow | |------------------------|---------------------------------------------|---------|------------| @@ -26,7 +26,7 @@ Filter/Channel pruning means pruning a larger part of the network, such as filte | structured pruning | pattern_lock | Yes | N/A | | filter/channel pruning | gradient_sensitivity | Yes | N/A | -LPOT also supports the two-shot execution of unstructured pruning and post-training quantization. +Neural Compressor also supports the two-shot execution of unstructured pruning and post-training quantization. - basic_magnitude: @@ -52,10 +52,10 @@ LPOT also supports the two-shot execution of unstructured pruning and post-train ### User facing API -LPOT pruning API is defined under `lpot.experimental.Pruning`, which takes a user defined yaml file as input. The user defined yaml defines training, pruning and evaluation behaviors. +Neural Compressor pruning API is defined under `neural_compressor.experimental.Pruning`, which takes a user defined yaml file as input. The user defined yaml defines training, pruning and evaluation behaviors. ``` -# pruning.py in lpot/experimental +# pruning.py in neural_compressor/experimental class Pruning(): def __init__(self, conf_fname_or_obj): # The initialization function of pruning, taking the path or Pruning_Conf class to user-defined yaml as input @@ -76,13 +76,13 @@ class Pruning(): def pruning_func(self, user_pruning_func) # The training function provided by user. This function takes framework runtime model object as input parameter, # and executes entire training process with self contained training hyper-parameters. - # It is optional if training could be configured by lpot built-in dataloader/optimizer/criterion. + # It is optional if training could be configured by neural_compressor built-in dataloader/optimizer/criterion. ... @eval_func.setter def eval_func(self, user_eval_func) # The evaluation function provided by user. This function takes framework runtime model object as input parameter and executes evaluation process. - # It is optional if evaluation could be configured by lpot built-in dataloader/optimizer/criterion. + # It is optional if evaluation could be configured by neural_compressor built-in dataloader/optimizer/criterion. ... @train_dataloader.setter @@ -122,7 +122,7 @@ class Pruning(): Simplest launcher code if training behavior is defined in user-defined yaml. ``` -from lpot.experimental import Pruning, common +from neural_compressor.experimental import Pruning, common prune = Pruning('/path/to/user/pruning/yaml') prune.model = common.Model(model) model = prune() @@ -244,9 +244,9 @@ The `approach` section defines which pruning algorithm is used and how to apply ### Pruning with user-defined pruning_func() -User can pass the customized training/evaluation functions to `Pruning` for flexible scenarios. `Pruning` In this case, pruning process can be done by pre-defined hooks in LPOT. User needs to put those hooks inside the training function. +User can pass the customized training/evaluation functions to `Pruning` for flexible scenarios. `Pruning` In this case, pruning process can be done by pre-defined hooks in Neural Compressor. User needs to put those hooks inside the training function. -LPOT defines several hooks for user pass +Neural Compressor defines several hooks for user pass ``` on_epoch_begin(epoch) : Hook executed at each epoch beginning @@ -294,7 +294,7 @@ def pruning_func(model): In this case, the launcher code is like the following: ```python -from lpot.experimental import Pruning, common +from neural_compressor.experimental import Pruning, common prune = Pruning(args.config) prune.model = common.Model(model) prune.pruning_func = pruning_func @@ -303,10 +303,10 @@ model = prune() ### Scheduler for Pruning and Quantization -LPOT defined Scheduler to automatically pipeline execute prune and post-training quantization. After appending separate component into scheduler pipeline, scheduler executes them one by one. In following example it executes the pruning and then post-training quantization. +Neural Compressor defined Scheduler to automatically pipeline execute prune and post-training quantization. After appending separate component into scheduler pipeline, scheduler executes them one by one. In following example it executes the pruning and then post-training quantization. ```python -from lpot.experimental import Quantization, common, Pruning, Scheduler +from neural_compressor.experimental import Quantization, common, Pruning, Scheduler prune = Pruning(prune_conf) quantizer = Quantization(post_training_quantization_conf) scheduler = Scheduler() @@ -318,8 +318,8 @@ opt_model = scheduler() ## Examples -### Examples in LPOT -Following examples are supported in LPOT: +### Examples in Neural Compressor +Following examples are supported in Neural Compressor: - CNN Examples: - [resnet example](../examples/pytorch/eager/image_recognition/imagenet/cpu/prune/README.md): magnitude pruning on resnet. diff --git a/docs/publication_list.md b/docs/publication_list.md index 11b3b553306..e5e4ce039bf 100644 --- a/docs/publication_list.md +++ b/docs/publication_list.md @@ -8,4 +8,5 @@ Full Publications * [Using Low-Precision Optimizations for High-Performance DL Inference Applications](https://techdecoded.intel.io/essentials/using-low-precision-optimizations-for-high-performance-dl-inference-applications/#gs.z20k91) (Apr 2021) * [Quantization support for ONNX using LPOT (Low precision optimization tool)](https://wiki.lfaidata.foundation/pages/viewpage.action?pageId=35160391) (Mar 2021) * [DL Boost Quantization with CERN's 3D-GANs model](https://www.nextplatform.com/2021/02/01/cern-uses-dlboost-oneapi-to-juice-inference-without-accuracy-loss/) (Feb 2021) -* [Intel Low Precision Optimization Tool](https://www.intel.com/content/www/us/en/artificial-intelligence/posts/intel-low-precision-optimization-tool.html) (Sep 2020) +* [Reduced Precision Strategies for Deep Learning: 3DGAN Use Case](https://indico.cern.ch/event/852553/contributions/4059283/attachments/2126838/3581708/Rehm_Florian-IML-Reduced_Precision.pdf) - [presentation](https://indico.cern.ch/event/852553/contributions/4059283/attachments/2126838/3588271/IML2020_wedam_rehm.mp4) on [4th IML Machine Learning Workshop](https://indico.cern.ch/event/852553/contributions/4059283/) (Oct 2020) +* [Intel Neural Compressor](https://www.intel.com/content/www/us/en/artificial-intelligence/posts/intel-low-precision-optimization-tool.html) (Sep 2020) diff --git a/docs/sigopt_strategy.md b/docs/sigopt_strategy.md index 90aad0609c8..2cf5643388a 100644 --- a/docs/sigopt_strategy.md +++ b/docs/sigopt_strategy.md @@ -7,7 +7,7 @@ Before using `SigOpt` strategy, a SigOpt account is necessary. - Each account has its own api token. Find your api token and then fill in the configure item `sigopt_api_token`. - Create a new project and write the corresponding name into the configure item `sigopt_project_id`. -- Set the name for this experiment in configure item `sigopt_experiment_id`, the default is lpot-tune. +- Set the name for this experiment in configure item `sigopt_experiment_id`, the default is nc-tune. ### SigOpt introduction @@ -21,9 +21,9 @@ SigOpt has two concepts: [project](https://app.sigopt.com/projects) and [experim - Evaluate your metric - Report an Observation to SigOpt -In LPOT sigopt strategy, the metrics add accuracy as constraint and optimize for latency. +In Neural Compressor sigopt strategy, the metrics add accuracy as constraint and optimize for latency. -### LPOT configuration +### Neural Compressor configuration Compare to `Basic` strategy, `sigopt_api_token` is necessary for `SigOpt` strategy. Create the corresponding project name `sigopt_project_id` in the account before using the strategy. @@ -33,7 +33,7 @@ tuning: name: sigopt sigopt_api_token: YOUR-ACCOUNT-API-TOKEN sigopt_project_id: PROJECT-ID - sigopt_experiment_name: lpot-tune + sigopt_experiment_name: nc-tune accuracy_criterion: relative: 0.01 exit_policy: diff --git a/docs/tensorboard.md b/docs/tensorboard.md index 4718bb12077..ad8965032fc 100644 --- a/docs/tensorboard.md +++ b/docs/tensorboard.md @@ -3,7 +3,7 @@ TensorBoard ## Introduction -TensorBoard is a suite of web applications that provide measurements and visualizations used to inspect and understand your machine learning workflow for [TensorFlow TensorBoard](https://github.com/tensorflow/tensorboard) and [PyTorch TensorBoard](https://github.com/pytorch/pytorch/tree/master/torch/utils/tensorboard). Intel® Low Precision Optimization Tool performs accuracy-driven quantization; the tuning process quantizes the tensor and performs graph transformation and optimization to achieve optimal performance under accuracy requirement. If you want to observe the behaviors of the optimizations, or if you want to discover why an accuracy target cannot be met, TensorBoard can provide you with some valuable information. You can inspect the graph and tensor after each tuning run. If a model cannot meet accuracy requirements, you can analyze the comparison of FP32 and the INT8 tensor histogram. +TensorBoard is a suite of web applications that provide measurements and visualizations used to inspect and understand your machine learning workflow for [TensorFlow TensorBoard](https://github.com/tensorflow/tensorboard) and [PyTorch TensorBoard](https://github.com/pytorch/pytorch/tree/master/torch/utils/tensorboard). Intel® Neural Compressor performs accuracy-driven quantization; the tuning process quantizes the tensor and performs graph transformation and optimization to achieve optimal performance under accuracy requirement. If you want to observe the behaviors of the optimizations, or if you want to discover why an accuracy target cannot be met, TensorBoard can provide you with some valuable information. You can inspect the graph and tensor after each tuning run. If a model cannot meet accuracy requirements, you can analyze the comparison of FP32 and the INT8 tensor histogram. We collect the TensorBoard event summary during evaluation. The first time is on the baseline FP32 model and later on at the end of each tuning runs are based on the quantized model. The TensorBoard log directory is named baseline_acc_ and tune__acc_, to indicate the stage and accuracy of the data that is generated. Users can select their data of interest to observe with TensorBoard. @@ -136,7 +136,7 @@ TensorFlow TensorBoard implementation includes four steps: 3. Run session.run() to predict and get the inference result of the output tensor list collected in the previous step. 4. Enumerate the output tensor and write the histogram. -See the [tensorflow.py](https://github.com/intel/lpot/tree/master/lpot/adaptor/tensorflow.py) evaluate() function for details. +See the [tensorflow.py](https://github.com/intel/neural-compressor/tree/master/neural_compressor/adaptor/tensorflow.py) evaluate() function for details. ### Usage @@ -172,7 +172,7 @@ See the [tensorflow.py](https://github.com/intel/lpot/tree/master/lpot/adaptor/t ```shell bash run_tuning.sh --topology=inception_v3 --dataset_location= \ - --input_model=./inceptionv3_fp32_pretrained_model.pb --output_model=./lpot_inceptionv3.pb --config=./inceptionv3_dump_tensor.yaml + --input_model=./inceptionv3_fp32_pretrained_model.pb --output_model=./nc_inceptionv3.pb --config=./inceptionv3_dump_tensor.yaml ``` 3. Start TensorBoard diff --git a/docs/transform.md b/docs/transform.md index dc1f104b03f..d9c43074dc9 100644 --- a/docs/transform.md +++ b/docs/transform.md @@ -1,7 +1,7 @@ Transform ========= -LPOT supports built-in preprocessing methods on different framework backends. Refer to [this HelloWorld example](/examples/helloworld/tf_example1) on how to configure a transform in a dataloader. +Neural Compressor supports built-in preprocessing methods on different framework backends. Refer to [this HelloWorld example](/examples/helloworld/tf_example1) on how to configure a transform in a dataloader. ## Transform support list @@ -14,7 +14,7 @@ LPOT supports built-in preprocessing methods on different framework backends. Re | RandomResizedCrop(size, scale, ratio, interpolation) | **size** (list or int): Size of the result
**scale** (tuple or list, default=(0.08, 1.0)):range of size of the origin size cropped
**ratio** (tuple or list, default=(3. / 4., 4. / 3.)): range of aspect ratio of the origin aspect ratio cropped
**interpolation** (str, default='bilinear'):Desired interpolation type, support 'bilinear', 'nearest' | Crop the given image to random size and aspect ratio | RandomResizedCrop:
   size: [10, 10] # or size: 10
   scale: [0.08, 1.0]
   ratio: [3. / 4., 4. / 3.]
   interpolation: bilinear | | Normalize(mean, std) | **mean** (list, default=[0.0]):means for each channel, if len(mean)=1, mean will be broadcasted to each channel, otherwise its length should be same with the length of image shape
**std** (list, default=[1.0]):stds for each channel, if len(std)=1, std will be broadcasted to each channel, otherwise its length should be same with the length of image shape | Normalize a image with mean and standard deviation | Normalize:
   mean: [0.0, 0.0, 0.0]
   std: [1.0, 1.0, 1.0] | | RandomCrop(size) | **size** (list or int): Size of the result | Crop the image at a random location to the given size | RandomCrop:
   size: [10, 10] # size: 10 | -| Compose(transform_list) | **transform_list** (list of Transform objects): list of transforms to compose | Composes several transforms together | If user uses yaml file to configure transforms, LPOT will automatic call Compose to group other transforms.
**In user code:**
from lpot.experimental.data import TRANSFORMS
preprocess = TRANSFORMS(framework, 'preprocess')
resize = preprocess["Resize"] (\**args)
normalize = preprocess["Normalize"] (\**args)
compose = preprocess["Compose"] ([resize, normalize])
sample = compose(sample)
# sample: image, label | +| Compose(transform_list) | **transform_list** (list of Transform objects): list of transforms to compose | Composes several transforms together | If user uses yaml file to configure transforms, Neural Compressor will automatic call Compose to group other transforms.
**In user code:**
from neural_compressor.experimental.data import TRANSFORMS
preprocess = TRANSFORMS(framework, 'preprocess')
resize = preprocess["Resize"] (\**args)
normalize = preprocess["Normalize"] (\**args)
compose = preprocess["Compose"] ([resize, normalize])
sample = compose(sample)
# sample: image, label | | CropResize(x, y, width, height, size, interpolation) | **x** (int):Left boundary of the cropping area
**y** (int):Top boundary of the cropping area
**width** (int):Width of the cropping area
**height** (int):Height of the cropping area
**size** (list or int): resize to new size after cropping
**interpolation** (str, default='bilinear'):Desired interpolation type, support 'bilinear', 'nearest' and 'bicubic' | Crop the input image with given location and resize it| CropResize:
   x: 0
   y: 5
   width: 224
   height: 224
   size: [100, 100] # or size: 100
   interpolation: bilinear | | RandomHorizontalFlip() | None | Horizontally flip the given image randomly | RandomHorizontalFlip: {} | | RandomVerticalFlip() | None | Vertically flip the given image randomly | RandomVerticalFlip: {} | @@ -43,7 +43,7 @@ LPOT supports built-in preprocessing methods on different framework backends. Re | RandomResizedCrop(size, scale, ratio, interpolation) | **size** (list or int): Size of the result
**scale** (tuple or list, default=(0.08, 1.0)):range of size of the origin size cropped
**ratio** (tuple or list, default=(3. / 4., 4. / 3.)): range of aspect ratio of the origin aspect ratio cropped
**interpolation** (str, default='bilinear'):Desired interpolation type, support 'bilinear', 'nearest', 'bicubic' | Crop the given image to random size and aspect ratio | RandomResizedCrop:
   size: [10, 10] # or size: 10
   scale: [0.08, 1.0]
   ratio: [3. / 4., 4. / 3.]
   interpolation: bilinear | | Normalize(mean, std) | **mean** (list, default=[0.0]):means for each channel, if len(mean)=1, mean will be broadcasted to each channel, otherwise its length should be same with the length of image shape
**std** (list, default=[1.0]):stds for each channel, if len(std)=1, std will be broadcasted to each channel, otherwise its length should be same with the length of image shape | Normalize a image with mean and standard deviation | Normalize:
   mean: [0.0, 0.0, 0.0]
   std: [1.0, 1.0, 1.0] | | RandomCrop(size) | **size** (list or int): Size of the result | Crop the image at a random location to the given size | RandomCrop:
   size: [10, 10] # size: 10 | -| Compose(transform_list) | **transform_list** (list of Transform objects): list of transforms to compose | Composes several transforms together | If user uses yaml file to configure transforms, LPOT will automatic call Compose to group other transforms.
**In user code:**
from lpot.experimental.data import TRANSFORMS
preprocess = TRANSFORMS(framework, 'preprocess')
resize = preprocess["Resize"] (\**args)
normalize = preprocess["Normalize"] (\**args)
compose = preprocess["Compose"] ([resize, normalize])
sample = compose(sample)
# sample: image, label| +| Compose(transform_list) | **transform_list** (list of Transform objects): list of transforms to compose | Composes several transforms together | If user uses yaml file to configure transforms, Neural Compressor will automatic call Compose to group other transforms.
**In user code:**
from neural_compressor.experimental.data import TRANSFORMS
preprocess = TRANSFORMS(framework, 'preprocess')
resize = preprocess["Resize"] (\**args)
normalize = preprocess["Normalize"] (\**args)
compose = preprocess["Compose"] ([resize, normalize])
sample = compose(sample)
# sample: image, label| | RandomHorizontalFlip() | None | Horizontally flip the given image randomly | RandomHorizontalFlip: {} | | RandomVerticalFlip() | None | Vertically flip the given image randomly | RandomVerticalFlip: {} | | Transpose(perm) | **perm** (list): A permutation of the dimensions of input image | Transpose image according perm | Transpose:
   perm: [1, 2, 0] | @@ -67,7 +67,7 @@ LPOT supports built-in preprocessing methods on different framework backends. Re | RandomResizedCrop(size, scale, ratio, interpolation) | **size** (list or int): Size of the result
**scale** (tuple or list, default=(0.08, 1.0)):range of size of the origin size cropped
**ratio** (tuple or list, default=(3. / 4., 4. / 3.)): range of aspect ratio of the origin aspect ratio cropped
**interpolation** (str, default='bilinear'):Desired interpolation type, support 'bilinear', 'nearest', 'bicubic' | Crop the given image to random size and aspect ratio | RandomResizedCrop:
   size: [10, 10] # or size: 10
   scale: [0.08, 1.0]
   ratio: [3. / 4., 4. / 3.]
   interpolation: bilinear | | Normalize(mean, std) | **mean** (list, default=[0.0]):means for each channel, if len(mean)=1, mean will be broadcasted to each channel, otherwise its length should be same with the length of image shape
**std** (list, default=[1.0]):stds for each channel, if len(std)=1, std will be broadcasted to each channel, otherwise its length should be same with the length of image shape | Normalize a image with mean and standard deviation | Normalize:
   mean: [0.0, 0.0, 0.0]
   std: [1.0, 1.0, 1.0] | | RandomCrop(size) | **size** (list or int): Size of the result | Crop the image at a random location to the given size | RandomCrop:
   size: [10, 10] # size: 10 | -| Compose(transform_list) | **transform_list** (list of Transform objects): list of transforms to compose | Composes several transforms together | If user uses yaml file to configure transforms, LPOT will automatic call Compose to group other transforms.
**In user code:**
from lpot.experimental.data import TRANSFORMS
preprocess = TRANSFORMS(framework, 'preprocess')
resize = preprocess["Resize"] (\**args)
normalize = preprocess["Normalize"] (\**args)
compose = preprocess["Compose"] ([resize, normalize])
sample = compose(sample)
# sample: image, label | +| Compose(transform_list) | **transform_list** (list of Transform objects): list of transforms to compose | Composes several transforms together | If user uses yaml file to configure transforms, Neural Compressor will automatic call Compose to group other transforms.
**In user code:**
from neural_compressor.experimental.data import TRANSFORMS
preprocess = TRANSFORMS(framework, 'preprocess')
resize = preprocess["Resize"] (\**args)
normalize = preprocess["Normalize"] (\**args)
compose = preprocess["Compose"] ([resize, normalize])
sample = compose(sample)
# sample: image, label | | CropResize(x, y, width, height, size, interpolation) | **x** (int):Left boundary of the cropping area
**y** (int):Top boundary of the cropping area
**width** (int):Width of the cropping area
**height** (int):Height of the cropping area
**size** (list or int): resize to new size after cropping
**interpolation** (str, default='bilinear'):Desired interpolation type, support 'bilinear', 'nearest', 'bicubic' | Crop the input image with given location and resize it | CropResize:
   x: 0
   y: 5
   width: 224
   height: 224
   size: [100, 100] # or size: 100
   interpolation: bilinear | | RandomHorizontalFlip() | None | Horizontally flip the given image randomly | RandomHorizontalFlip: {} | | RandomVerticalFlip() | None | Vertically flip the given image randomly | RandomVerticalFlip: {} | @@ -89,7 +89,7 @@ LPOT supports built-in preprocessing methods on different framework backends. Re | RandomResizedCrop(size, scale, ratio, interpolation) | **size** (list or int): Size of the result
**scale** (tuple or list, default=(0.08, 1.0)):range of size of the origin size cropped
**ratio** (tuple or list, default=(3. / 4., 4. / 3.)): range of aspect ratio of the origin aspect ratio cropped
**interpolation** (str, default='bilinear'):Desired interpolation type, support 'bilinear', 'nearest' | Crop the given image to random size and aspect ratio | RandomResizedCrop:
   size: [10, 10] # or size: 10
   scale: [0.08, 1.0]
   ratio: [3. / 4., 4. / 3.]
   interpolation: bilinear | | Normalize(mean, std) | **mean** (list, default=[0.0]):means for each channel, if len(mean)=1, mean will be broadcasted to each channel, otherwise its length should be same with the length of image shape
**std** (list, default=[1.0]):stds for each channel, if len(std)=1, std will be broadcasted to each channel, otherwise its length should be same with the length of image shape | Normalize a image with mean and standard deviation | Normalize:
   mean: [0.0, 0.0, 0.0]
   std: [1.0, 1.0, 1.0] | | RandomCrop(size) | **size** (list or int): Size of the result | Crop the image at a random location to the given size | RandomCrop:
   size: [10, 10] # size: 10 | -| Compose(transform_list) | **transform_list** (list of Transform objects): list of transforms to compose | Composes several transforms together | If user uses yaml file to configure transforms, LPOT will automatic call Compose to group other transforms.
**In user code:**
from lpot.experimental.data import TRANSFORMS
preprocess = TRANSFORMS(framework, 'preprocess')
resize = preprocess["Resize"] (\**args)
normalize = preprocess["Normalize"] (\**args)
compose = preprocess["Compose"] ([resize, normalize])
sample = compose(sample)
# sample: image, label | +| Compose(transform_list) | **transform_list** (list of Transform objects): list of transforms to compose | Composes several transforms together | If user uses yaml file to configure transforms, Neural Compressor will automatic call Compose to group other transforms.
**In user code:**
from neural_compressor.experimental.data import TRANSFORMS
preprocess = TRANSFORMS(framework, 'preprocess')
resize = preprocess["Resize"] (\**args)
normalize = preprocess["Normalize"] (\**args)
compose = preprocess["Compose"] ([resize, normalize])
sample = compose(sample)
# sample: image, label | | CropResize(x, y, width, height, size, interpolation) | **x** (int):Left boundary of the cropping area
**y** (int):Top boundary of the cropping area
**width** (int):Width of the cropping area
**height** (int):Height of the cropping area
**size** (list or int): resize to new size after cropping
**interpolation** (str, default='bilinear'):Desired interpolation type, support 'bilinear', 'nearest' | Crop the input image with given location and resize it| CropResize:
   x: 0
   y: 5
   width: 224
   height: 224
   size: [100, 100] # or size: 100
   interpolation: bilinear | | RandomHorizontalFlip() | None | Horizontally flip the given image randomly | RandomHorizontalFlip: {} | | RandomVerticalFlip() | None | Vertically flip the given image randomly | RandomVerticalFlip: {} | diff --git a/docs/tuning_strategies.md b/docs/tuning_strategies.md index 50b454b4ee4..a8664dfadd7 100644 --- a/docs/tuning_strategies.md +++ b/docs/tuning_strategies.md @@ -3,7 +3,7 @@ Tuning Strategies ## Introduction -Intel® Low Precision Optimization Tool aims to help users quickly deploy +Intel® Neural Compressor aims to help users quickly deploy the low-precision inference solution on popular Deep Learning frameworks such as TensorFlow, PyTorch, and MxNet. Using built-in strategies, it automatically optimizes low-precision recipes for deep learning models to @@ -21,7 +21,7 @@ below: Strategies begin with an adaptor layer (Framework Adaptor) where the user passes a framework-specific model to initialize an instance of the -`lpot.Quantization() class`; strategies call the `self.adaptor.query_fw_capability(model)` to get the framework and +`neural_compressor.Quantization() class`; strategies call the `self.adaptor.query_fw_capability(model)` to get the framework and model-specific quantization capabilities. From there, each strategy merges model-specific configurations in a `yaml` configuration file to filter some capability from the first step in order to generate the tuning space. Each @@ -32,7 +32,7 @@ tuning phase stops when the `accuracy` criteria is met. ## Configurations -Detailed configuration templates can be found [here](../lpot/template). +Detailed configuration templates can be found [here](../neural_compressor/template). ### Model-specific configurations @@ -49,7 +49,7 @@ quantization: # optional. tuning constrai first_conv_or_matmul_quantization: True # optional. default value is True. calibration: sampling_size: 1000, 2000 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. dataset: TFRecordDataset: root: /path/to/tf_record @@ -144,7 +144,7 @@ tuning: optimization of black-box functions. This strategy comes from the [Bayesian optimization](https://github.com/fmfn/BayesianOptimization) package and changed it to a discrete version that complied with the strategy standard of -Intel® Low Precision Optimization Tool. It uses [Gaussian processes](https://en.wikipedia.org/wiki/Neural_network_Gaussian_process) to define +Intel® Neural Compressor. It uses [Gaussian processes](https://en.wikipedia.org/wiki/Neural_network_Gaussian_process) to define the prior/posterior distribution over the black-box function with the tuning history, and then finds the tuning configuration that maximizes the expected improvement. For now, `Bayesian` just focus on op-wise quantize configs tuning @@ -313,7 +313,7 @@ tuning: #### Usage -Compare to `Basic`, `sigopt_api_token` and `sigopt_project_id` is necessary for `SigOpt`.`sigopt_experiment_name` is optional, the default name is `lpot-tune`. +Compare to `Basic`, `sigopt_api_token` and `sigopt_project_id` is necessary for `SigOpt`.`sigopt_experiment_name` is optional, the default name is `nc-tune`. ```yaml tuning: @@ -321,7 +321,7 @@ tuning: name: sigopt sigopt_api_token: YOUR-ACCOUNT-API-TOKEN sigopt_project_id: PROJECT-ID - sigopt_experiment_name: lpot-tune + sigopt_experiment_name: nc-tune accuracy_criterion: relative: 0.01 exit_policy: @@ -330,11 +330,11 @@ tuning: ``` -For details, [how to use sigopt strategy in lpot](./sigopt_strategy.md) is available. +For details, [how to use sigopt strategy in neural_compressor](./sigopt_strategy.md) is available. ## Customize a New Tuning Strategy -Intel® Low Precision Optimization Tool supports new strategy extension by implementing a subclass of `TuneStrategy` class in lpot.strategy package +Intel® Neural Compressor supports new strategy extension by implementing a subclass of `TuneStrategy` class in neural_compressor.strategy package and registering this strategy by `strategy_registry` decorator. for example, user can implement a `Abc` strategy like below: @@ -355,4 +355,4 @@ The `next_tune_cfg` function is used to yield the next tune configuration accord all the tuning space till a quantization configuration meets pre-defined accuracy criterion. If the traverse behavior of `TuneStrategy` base class does not meet new strategy requirement, it could re-implement `traverse` function with self own logic. -An example like this is under [TPE Strategy](../lpot/strategy/strategy.py). +An example like this is under [TPE Strategy](../neural_compressor/strategy/strategy.py). diff --git a/docs/tutorial.md b/docs/tutorial.md index 0cb44c048fb..83789b5b3ae 100644 --- a/docs/tutorial.md +++ b/docs/tutorial.md @@ -1,9 +1,9 @@ Tutorial ======== -This tutorial provides instructions (with examples) on how to integrate models with Intel® Low Precision Optimization Tool (LPOT). +This tutorial provides instructions (with examples) on how to integrate models with Intel® Neural Compressor. -The following diagram shows steps for enabling model with LPOT: +The following diagram shows steps for enabling model with Neural Compressor: ![Tutorial](imgs/tutorial.png "Tutorial") @@ -16,21 +16,21 @@ To write launcher code, a user needs to prepare four components: * `Postprocess` *optional* * `Metric` -LPOT constructs the whole quantization/pruning process using these four components. +Neural Compressor constructs the whole quantization/pruning process using these four components. -LPOT has added built-in support for popular dataloaders/datasets and metrics to ease the preparation. Refer to [dataset](./dataset.md) and [metric](./metric.md) to learn how to use them in yaml. +Neural Compressor has added built-in support for popular dataloaders/datasets and metrics to ease the preparation. Refer to [dataset](./dataset.md) and [metric](./metric.md) to learn how to use them in yaml. -LPOT also supports registering custom datasets and custom metrics by code. +Neural Compressor also supports registering custom datasets and custom metrics by code. -As for model, LPOT abstract a common API, named [lpot.experimental.common.Model](../lpot/experimental/common/model.py), to cover the case in which model, weight, and other necessary info are separately stored. Refer to [model](./model.md) to learn how to use it. +As for model, Neural Compressor abstract a common API, named [neural_compressor.experimental.common.Model](../neural_compressor/experimental/common/model.py), to cover the case in which model, weight, and other necessary info are separately stored. Refer to [model](./model.md) to learn how to use it. -Postprocess is treated as a special transform by LPOT which is only needed when a model output is mismatching with the expected input of LPOT built-in metrics. If a user is using a custom metric, the postprocess is not needed as the custom metric implementation needed ensures it can handle the model output correctly. On the other hand, the postprocess logic becomes part of the custom metric implementation. +Postprocess is treated as a special transform by Neural Compressor which is only needed when a model output is mismatching with the expected input of Neural Compressor built-in metrics. If a user is using a custom metric, the postprocess is not needed as the custom metric implementation needed ensures it can handle the model output correctly. On the other hand, the postprocess logic becomes part of the custom metric implementation. -The example below shows how to enable LPOT on TensorFlow mobilenet_v1 with a built-in dataloader, dataset, and metric. +The example below shows how to enable Neural Compressor on TensorFlow mobilenet_v1 with a built-in dataloader, dataset, and metric. ```python # main.py -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization('./conf.yaml') quantizer.model = common.Model("./mobilenet_v1_1.0_224_frozen.pb") quantized_model = quantizer() @@ -82,13 +82,13 @@ evaluation: ``` -In this example, we use an LPOT built-in `ImageRecord` dataset and a `topk` metric. +In this example, we use an Neural Compressor built-in `ImageRecord` dataset and a `topk` metric. -If the user wants to use a dataset or metric that is not supported by the LPOT built-in, the user can register a custom one as demonstrated in the below helloworld example. +If the user wants to use a dataset or metric that is not supported by built-in, the user can register a custom one as demonstrated in the below helloworld example. ```python # main.py -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common class Dataset(object): def __init__(self): @@ -133,7 +133,7 @@ q_model = quantizer() > > In the customized dataset, the `__getitem__()` interface must be implemented and return a single sample and label. In this example, it returns the (image, label) pair. The user can return (image, 0) for a label-free case. -In the customized metric, the update() function records the predicted result of each mini-batch. The result() function is invoked by LPOT at the end of the evaluation to return a scalar to reflect model accuracy. By default, this scalar is higher-is-better. If this scalar returned from the customized metric is a lower-is-better value, `tuning.accuracy_criterion.higher_is_better` in yaml should be set to `False`. +In the customized metric, the update() function records the predicted result of each mini-batch. The result() function is invoked by Neural Compressor at the end of the evaluation to return a scalar to reflect model accuracy. By default, this scalar is higher-is-better. If this scalar returned from the customized metric is a lower-is-better value, `tuning.accuracy_criterion.higher_is_better` in yaml should be set to `False`. ```yaml # conf.yaml diff --git a/engine/converter/extractors/extractor.py b/engine/converter/extractors/extractor.py index 1076c4b4038..f4fb35e941d 100644 --- a/engine/converter/extractors/extractor.py +++ b/engine/converter/extractors/extractor.py @@ -17,7 +17,7 @@ from .tf_extractor import TensorflowExtractor from .onnx_extractor import ONNXExtractor -from lpot.utils import logger +from neural_compressor.utils import logger EXTRACTORS = { diff --git a/engine/converter/extractors/onnx_extractor.py b/engine/converter/extractors/onnx_extractor.py index c55d8163439..6c23418a5a6 100644 --- a/engine/converter/extractors/onnx_extractor.py +++ b/engine/converter/extractors/onnx_extractor.py @@ -17,7 +17,7 @@ import onnx from onnx.numpy_helper import to_array -from lpot.utils import logger +from neural_compressor.utils import logger import numpy as np from ..graph.graph import Graph from ..ops.op import OPERATORS @@ -31,7 +31,7 @@ class ONNXExtractor(object): and output_tensors, these tensors record the source/dest op name. All of these nodes (in a list) will compose a graph, which is Graph class, as the return object. Args: - model: lpot TensorflowBaseModel + model: neural_compressor TensorflowBaseModel Return: Graph: Graph class, the new graph object diff --git a/engine/converter/extractors/tf_extractor.py b/engine/converter/extractors/tf_extractor.py index 9dc450531c5..4034bd22000 100644 --- a/engine/converter/extractors/tf_extractor.py +++ b/engine/converter/extractors/tf_extractor.py @@ -17,7 +17,7 @@ from tensorflow.python.framework import tensor_util import tensorflow as tf -from lpot.utils import logger +from neural_compressor.utils import logger import numpy as np from ..graph.graph import Graph from ..ops.op import OPERATORS @@ -31,7 +31,7 @@ class TensorflowExtractor(object): and output_tensors, these tensors record the source/dest op name. All of these nodes (in a list) will compose a graph, which is Graph class, as the return object. Args: - model: lpot TensorflowBaseModel + model: neural_compressor TensorflowBaseModel Return: Graph: Graph class, the new graph object diff --git a/engine/converter/graph/graph.py b/engine/converter/graph/graph.py index baf59a1a602..159aebf2460 100644 --- a/engine/converter/graph/graph.py +++ b/engine/converter/graph/graph.py @@ -17,7 +17,7 @@ import re from collections import OrderedDict -from lpot.utils import logger +from neural_compressor.utils import logger import numpy as np import yaml import os diff --git a/engine/converter/graph_utils.py b/engine/converter/graph_utils.py index 603a625c902..a52f5c68896 100644 --- a/engine/converter/graph_utils.py +++ b/engine/converter/graph_utils.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lpot.utils import logger +from neural_compressor.utils import logger import copy import re import numpy as np diff --git a/engine/converter/loaders/loader.py b/engine/converter/loaders/loader.py index 1f44ee7e732..5db24ded05f 100644 --- a/engine/converter/loaders/loader.py +++ b/engine/converter/loaders/loader.py @@ -15,8 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lpot.model.model import MODELS, get_model_fwk_name, get_model_type -from lpot.utils.utility import LazyImport +from neural_compressor.model.model import MODELS, get_model_fwk_name, get_model_type +from neural_compressor.utils.utility import LazyImport onnx = LazyImport('onnx') class Loader(object): diff --git a/engine/converter/onnx_utils.py b/engine/converter/onnx_utils.py index 82a5f56ef46..deffb03c963 100644 --- a/engine/converter/onnx_utils.py +++ b/engine/converter/onnx_utils.py @@ -21,7 +21,7 @@ import numpy as np import re from collections import namedtuple, OrderedDict -from lpot.utils import logger +from neural_compressor.utils import logger from .ops.tensor import Tensor from . import graph_utils as util @@ -29,7 +29,7 @@ def get_node_children_names(model, node): """Get the node's output nodes' name in the graph Args: - model: lpot ONNXModel + model: neural_compressor ONNXModel node: NodeProto in onnx model Returns: outputs: names list @@ -43,7 +43,7 @@ def get_node_children_names(model, node): def get_initializer_children_names(model, initializer): """Get the initializer's output nodes' name in the graph Args: - model: lpot ONNXModel + model: neural_compressor ONNXModel initializer: initializer in onnx model Returns: outputs: names list @@ -61,7 +61,7 @@ def graph_node_names_details(model): tensor value and the input_tensor source op; output_names in value is the node ouput name list; outputs in value is for output_tensor dest op Args: - model: lpot ONNXModel + model: neural_compressor ONNXModel Returns: node_names_details: the graph node info dict @@ -151,7 +151,7 @@ def onnx_extract_operator(node, model, nodes_dict): """decorate the operator in onnx Args: node: NodeProto - model: lpot ONNXModel + model: neural_compressor ONNXModel nodes_dict: dict, return value from graph_node_names_details tf_dtypes: dict, for get the dtype string diff --git a/engine/converter/ops/tensor.py b/engine/converter/ops/tensor.py index 6e39902104d..b3306b8b354 100644 --- a/engine/converter/ops/tensor.py +++ b/engine/converter/ops/tensor.py @@ -29,7 +29,7 @@ def __init__(self, dtype=None, location=None): self._name = name - # assume data in lpot tensor should be numpy array + # assume data in neural_compressor tensor should be numpy array # however, we don't assign the data diretly if the tensor is # const like weight when parse model # otherwise it will make a bloated new graph diff --git a/engine/converter/sub_graph/subgraph_matcher.py b/engine/converter/sub_graph/subgraph_matcher.py index c7f6887a587..dbd4d383cee 100644 --- a/engine/converter/sub_graph/subgraph_matcher.py +++ b/engine/converter/sub_graph/subgraph_matcher.py @@ -16,7 +16,7 @@ # limitations under the License. from .pattern import supported_patterns, PATTERNS -from lpot.utils import logger +from neural_compressor.utils import logger EXECUTOR_TYPE = { "MatMulWithBias": "InnerProduct", diff --git a/engine/converter/tf_utils.py b/engine/converter/tf_utils.py index 6dcecd86ac7..82157d3972e 100644 --- a/engine/converter/tf_utils.py +++ b/engine/converter/tf_utils.py @@ -22,7 +22,7 @@ import numpy as np import re from collections import namedtuple, OrderedDict -from lpot.utils import logger +from neural_compressor.utils import logger from .ops.tensor import Tensor from . import graph_utils as util @@ -103,7 +103,7 @@ def tf_extract_operator(node, model, nodes_dict): """decorate the operator in tensorflow Args: node: NodeDef - model: lpot TensorflowBaseModel + model: neural_compressor TensorflowBaseModel nodes_dict: dict, return value from graph_node_names_details tf_dtypes: dict, for get the dtype string diff --git a/examples/engine/nlp/bert_base_mrpc/README.md b/examples/engine/nlp/bert_base_mrpc/README.md index 4dcfa343fd2..663ca5d4c54 100644 --- a/examples/engine/nlp/bert_base_mrpc/README.md +++ b/examples/engine/nlp/bert_base_mrpc/README.md @@ -7,7 +7,7 @@ Step-by-Step ```shell conda create -n python=3.7 conda activate - cd /examples/deepengine/nlp/bert_base_mrpc + cd /examples/deepengine/nlp/bert_base_mrpc pip install 1.15.0 up2 from links below: https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp37-cp37m-manylinux2010_x86_64.whl pip install -r requirements.txt diff --git a/examples/engine/nlp/bert_base_mrpc/requirements.txt b/examples/engine/nlp/bert_base_mrpc/requirements.txt index 01f3ffd0a8e..c21dde77730 100644 --- a/examples/engine/nlp/bert_base_mrpc/requirements.txt +++ b/examples/engine/nlp/bert_base_mrpc/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor onnx onnxruntime numpy==1.19 diff --git a/examples/engine/nlp/bert_base_mrpc/run_engine.py b/examples/engine/nlp/bert_base_mrpc/run_engine.py index e6c584edcea..56eacebebc0 100644 --- a/examples/engine/nlp/bert_base_mrpc/run_engine.py +++ b/examples/engine/nlp/bert_base_mrpc/run_engine.py @@ -65,7 +65,7 @@ def main(): args = get_args() if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common ds = TF_BERTDataSet(args.data_dir, args.vocab_file, args.do_lower_case) evaluator = Benchmark(args.config) evaluator.model = common.Model(args.input_model) @@ -73,7 +73,7 @@ def main(): evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common ds = TF_BERTDataSet(args.data_dir, args.vocab_file, args.do_lower_case) quantizer = Quantization(args.config) quantizer.model = common.Model(args.input_model) diff --git a/examples/engine/nlp/bert_base_mrpc/train_freeze_pb.py b/examples/engine/nlp/bert_base_mrpc/train_freeze_pb.py index 224e3d19cdb..80d192c3cd5 100644 --- a/examples/engine/nlp/bert_base_mrpc/train_freeze_pb.py +++ b/examples/engine/nlp/bert_base_mrpc/train_freeze_pb.py @@ -37,11 +37,11 @@ flags.DEFINE_string( "config", None, - "lpot config for the model.") + "neural_compressor config for the model.") flags.DEFINE_string( "input_model", None, - "lpot input model path.") + "neural_compressor input model path.") ## Required parameters flags.DEFINE_string( @@ -726,7 +726,7 @@ def tpu_scaffold(): loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) - # if use lpot reuse the eval metric + # if use neural_compressor reuse the eval metric elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): @@ -1024,7 +1024,7 @@ def main(_): is_training=False, drop_remainder=False) - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(FLAGS.config) quantizer.model = common.Model(estimator, input_fn=estimator_input_fn) pb_path = os.path.join(FLAGS.output_dir, "bert_base_mrpc.pb") diff --git a/examples/engine/nlp/bert_large/README.md b/examples/engine/nlp/bert_large/README.md index 5bd2f2acb5a..a7bfd4a9dc4 100644 --- a/examples/engine/nlp/bert_large/README.md +++ b/examples/engine/nlp/bert_large/README.md @@ -7,7 +7,7 @@ Step-by-Step ```shell conda create -n python=3.7 conda activate - cd /examples/deepengine/nlp/bert_large + cd /examples/deepengine/nlp/bert_large pip install 1.15.0 up2 from links below: https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp37-cp37m-manylinux2010_x86_64.whl pip install -r requirements.txt diff --git a/examples/engine/nlp/bert_large/requirements.txt b/examples/engine/nlp/bert_large/requirements.txt index 5edbb9589ee..28acbfecea7 100644 --- a/examples/engine/nlp/bert_large/requirements.txt +++ b/examples/engine/nlp/bert_large/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor onnx onnxruntime transformers diff --git a/examples/engine/nlp/bert_large/run_engine.py b/examples/engine/nlp/bert_large/run_engine.py index 05a2c6ddf08..a0d6ee3b599 100644 --- a/examples/engine/nlp/bert_large/run_engine.py +++ b/examples/engine/nlp/bert_large/run_engine.py @@ -74,7 +74,7 @@ def main(): args = get_args() if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common ds = TF_BERTDataSet(args.data_dir, args.vocab_file, args.do_lower_case, args.perf_count) evaluator = Benchmark(args.config) evaluator.model = common.Model(args.input_model) @@ -82,7 +82,7 @@ def main(): evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common ds = TF_BERTDataSet(args.data_dir, args.vocab_file, args.do_lower_case, args.perf_count) quantizer = Quantization(args.config) quantizer.model = common.Model(args.input_model) diff --git a/examples/engine/nlp/distilbert_base_uncased_mrpc/README.md b/examples/engine/nlp/distilbert_base_uncased_mrpc/README.md index 178a319b9ce..d627d2683e6 100644 --- a/examples/engine/nlp/distilbert_base_uncased_mrpc/README.md +++ b/examples/engine/nlp/distilbert_base_uncased_mrpc/README.md @@ -7,7 +7,7 @@ ```shell conda create -n python=3.7 conda activate -cd /examples/engine/nlp/distilbert_base_uncased_mrpc +cd /examples/engine/nlp/distilbert_base_uncased_mrpc pip install -r requirements.txt ``` diff --git a/examples/engine/nlp/distilbert_base_uncased_mrpc/requirements.txt b/examples/engine/nlp/distilbert_base_uncased_mrpc/requirements.txt index fd764f102f9..ead9d608a57 100644 --- a/examples/engine/nlp/distilbert_base_uncased_mrpc/requirements.txt +++ b/examples/engine/nlp/distilbert_base_uncased_mrpc/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor intel-tensorflow==2.5.0 transformers==4.10.3 accelerate @@ -8,4 +8,4 @@ protobuf torch >= 1.3 onnx onnxruntime -pycocotools==2.0.0 \ No newline at end of file +pycocotools==2.0.0 diff --git a/examples/engine/nlp/distilbert_base_uncased_mrpc/run_engine.py b/examples/engine/nlp/distilbert_base_uncased_mrpc/run_engine.py index 74a63dbeb05..a2081dc870f 100644 --- a/examples/engine/nlp/distilbert_base_uncased_mrpc/run_engine.py +++ b/examples/engine/nlp/distilbert_base_uncased_mrpc/run_engine.py @@ -57,7 +57,7 @@ def main(): args = get_args() if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common ds = TF_BERTDataSet(args.data_dir, args.vocab_file, args.do_lower_case) evaluator = Benchmark(args.config) evaluator.model = common.Model(args.input_model) @@ -65,7 +65,7 @@ def main(): evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common ds = TF_BERTDataSet(args.data_dir, args.vocab_file, args.do_lower_case) quantizer = Quantization(args.config) quantizer.model = common.Model(args.input_model) diff --git a/examples/helloworld/tf_example1/README.md b/examples/helloworld/tf_example1/README.md index 14a8033c8cf..cf447e47435 100644 --- a/examples/helloworld/tf_example1/README.md +++ b/examples/helloworld/tf_example1/README.md @@ -1,6 +1,6 @@ tf_example1 example ===================== -This example is used to demonstrate how to utilize LPOT builtin dataloader and metric to enabling quantization without coding effort. +This example is used to demonstrate how to utilize Neural Compressor builtin dataloader and metric to enabling quantization without coding effort. 1. Download the FP32 model wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/mobilenet_v1_1.0_224_frozen.pb @@ -25,8 +25,8 @@ quantization: # optional. tuning constrai weight: granularity: per_channel -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: @@ -44,7 +44,7 @@ evaluation: # optional. required if use 3. Run quantization We only need to add the following lines for quantization to create an int8 model. ```python - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization('./conf.yaml') quantizer.model = common.Model("./mobilenet_v1_1.0_224_frozen.pb") quantized_model = quantizer() diff --git a/examples/helloworld/tf_example1/conf.yaml b/examples/helloworld/tf_example1/conf.yaml index 91867815e13..5af8372bc91 100644 --- a/examples/helloworld/tf_example1/conf.yaml +++ b/examples/helloworld/tf_example1/conf.yaml @@ -29,8 +29,8 @@ quantization: # optional. tuning constrai height: 224 width: 224 -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/helloworld/tf_example1/test.py b/examples/helloworld/tf_example1/test.py index 9abe6221c7e..92e8c2a2f74 100644 --- a/examples/helloworld/tf_example1/test.py +++ b/examples/helloworld/tf_example1/test.py @@ -4,7 +4,7 @@ import numpy as np def main(): - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization('./conf.yaml') quantizer.model = common.Model("./mobilenet_v1_1.0_224_frozen.pb") quantized_model = quantizer() diff --git a/examples/helloworld/tf_example2/README.md b/examples/helloworld/tf_example2/README.md index 13beff651f9..a5d170e6abd 100644 --- a/examples/helloworld/tf_example2/README.md +++ b/examples/helloworld/tf_example2/README.md @@ -4,15 +4,15 @@ tf_example2 example Step-by-Step ============ -This is Hello World to demonstrate how to quick start with Intel® Low Precision Optimization Tool. It is a Keras model on mnist dataset defined by helloworld/train.py, we will implement a customized metric and a customized dataloader for quantization and evaluation. +This is Hello World to demonstrate how to quick start with Intel® Neural Compressor. It is a Keras model on mnist dataset defined by helloworld/train.py, we will implement a customized metric and a customized dataloader for quantization and evaluation. ## Prerequisite ### 1. Installation ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow ```shell @@ -93,7 +93,7 @@ q_model = quantizer() ``` ### 5. Run quantized model -please get the input and output op name from lpot_workspace/tensorflow/hello_world/deploy.yaml +please get the input and output op name from nc_workspace/tensorflow/hello_world/deploy.yaml ```yaml model: name: hello_world diff --git a/examples/helloworld/tf_example2/test.py b/examples/helloworld/tf_example2/test.py index 506128c422d..f9275b12efd 100644 --- a/examples/helloworld/tf_example2/test.py +++ b/examples/helloworld/tf_example2/test.py @@ -16,8 +16,8 @@ def __len__(self): return len(self.test_images) # Define a customized Metric function -from lpot.experimental import Quantization, common -from lpot.metric import BaseMetric +from neural_compressor.experimental import Quantization, common +from neural_compressor.metric import BaseMetric class MyMetric(BaseMetric): def __init__(self, *args): self.pred_list = [] diff --git a/examples/helloworld/tf_example3/README.md b/examples/helloworld/tf_example3/README.md index dda78e527c9..9e104c3ff79 100644 --- a/examples/helloworld/tf_example3/README.md +++ b/examples/helloworld/tf_example3/README.md @@ -1,6 +1,6 @@ tf_example3 example ===================== -This example is used to demonstrate how to utilize LPOT builtin dataloader and metric to enabling quantization for models defined in slim. +This example is used to demonstrate how to utilize Neural Compressor builtin dataloader and metric to enabling quantization for models defined in slim. 1. Prepare @@ -54,7 +54,7 @@ evaluation: # optional. required if use ``` * In order to do quantization for slim models, we need to get graph from slim .ckpt first. ```python - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization('./conf.yaml') # Do quantization diff --git a/examples/helloworld/tf_example3/conf.yaml b/examples/helloworld/tf_example3/conf.yaml index a34fa9a4c55..dbf0b6eb6f9 100644 --- a/examples/helloworld/tf_example3/conf.yaml +++ b/examples/helloworld/tf_example3/conf.yaml @@ -35,8 +35,8 @@ quantization: # optional. tuning constrai weight: granularity: per_channel -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/helloworld/tf_example3/test.py b/examples/helloworld/tf_example3/test.py index 9972cafe43d..83a8ae2832b 100644 --- a/examples/helloworld/tf_example3/test.py +++ b/examples/helloworld/tf_example3/test.py @@ -6,7 +6,7 @@ def main(): - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization('./conf.yaml') # Do quantization diff --git a/examples/helloworld/tf_example4/conf.yaml b/examples/helloworld/tf_example4/conf.yaml index 1d68f4edca2..c0fbe4bf33b 100644 --- a/examples/helloworld/tf_example4/conf.yaml +++ b/examples/helloworld/tf_example4/conf.yaml @@ -23,8 +23,8 @@ quantization: # optional. tuning constrai calibration: sampling_size: 20 # optional. default value is 100. used to set how many samples should be used in calibration. -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. diff --git a/examples/helloworld/tf_example4/test.py b/examples/helloworld/tf_example4/test.py index 2fa9f6ba524..6fb686e1a8a 100644 --- a/examples/helloworld/tf_example4/test.py +++ b/examples/helloworld/tf_example4/test.py @@ -2,7 +2,7 @@ import time import numpy as np from tensorflow import keras -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common tf.compat.v1.disable_eager_execution() diff --git a/examples/helloworld/tf_example5/README.md b/examples/helloworld/tf_example5/README.md index 8a9e5332aa8..04c90e78c22 100644 --- a/examples/helloworld/tf_example5/README.md +++ b/examples/helloworld/tf_example5/README.md @@ -43,7 +43,7 @@ evaluation: # optional. required if use 3. Run quantization We only need to add the following lines for quantization to create an int8 model. ```python - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization('./conf.yaml') quantizer.model = common.Model('./mobilenet_v1_1.0_224_frozen.pb') quantized_model = quantizer() @@ -56,7 +56,7 @@ We only need to add the following lines for quantization to create an int8 model 4. Run benchmark according to config ```python - from lpot.experimental import Quantization, Benchmark, common + from neural_compressor.experimental import Quantization, Benchmark, common evaluator = Benchmark('./conf.yaml') evaluator.model = common.Model('./int8.pb') results = evaluator() diff --git a/examples/helloworld/tf_example5/conf.yaml b/examples/helloworld/tf_example5/conf.yaml index 6b85c287afe..92bc27b474b 100644 --- a/examples/helloworld/tf_example5/conf.yaml +++ b/examples/helloworld/tf_example5/conf.yaml @@ -29,8 +29,8 @@ quantization: # optional. tuning constrai height: 224 width: 224 -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/helloworld/tf_example5/test.py b/examples/helloworld/tf_example5/test.py index a5da2e44412..ca430a96b18 100644 --- a/examples/helloworld/tf_example5/test.py +++ b/examples/helloworld/tf_example5/test.py @@ -10,14 +10,14 @@ def main(): args = arg_parser.parse_args() if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization('./conf.yaml') quantizer.model = common.Model("./mobilenet_v1_1.0_224_frozen.pb") quantized_model = quantizer() quantized_model.save('./int8.pb') if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark('./conf.yaml') evaluator.model = common.Model('int8.pb') evaluator(mode='accuracy') diff --git a/examples/helloworld/tf_example6/README.md b/examples/helloworld/tf_example6/README.md index 53bd0a17086..57e267c25fa 100644 --- a/examples/helloworld/tf_example6/README.md +++ b/examples/helloworld/tf_example6/README.md @@ -43,7 +43,7 @@ evaluation: # optional. required if use 3. Run quantization We only need to add the following lines for quantization to create an int8 model. ```python - from lpot import Quantization + from neural_compressor import Quantization quantizer = Quantization('./conf.yaml') quantized_model = quantizer('./mobilenet_v1_1.0_224_frozen.pb') tf.io.write_graph(graph_or_graph_def=quantized_model, @@ -59,7 +59,7 @@ We only need to add the following lines for quantization to create an int8 model 4. Run benchmark according to config ```python # Optional, run benchmark - from lpot import Benchmark + from neural_compressor import Benchmark evaluator = Benchmark('./conf.yaml') results = evaluator('./int8.pb') diff --git a/examples/helloworld/tf_example6/conf.yaml b/examples/helloworld/tf_example6/conf.yaml index 771b2765c6c..02fdf2ec2a8 100644 --- a/examples/helloworld/tf_example6/conf.yaml +++ b/examples/helloworld/tf_example6/conf.yaml @@ -29,8 +29,8 @@ quantization: # optional. tuning constrai height: 224 width: 224 -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/helloworld/tf_example6/test.py b/examples/helloworld/tf_example6/test.py index 01fb21bd06e..4383dd445af 100644 --- a/examples/helloworld/tf_example6/test.py +++ b/examples/helloworld/tf_example6/test.py @@ -10,7 +10,7 @@ def main(): args = arg_parser.parse_args() if args.tune: - from lpot import Quantization + from neural_compressor import Quantization quantizer = Quantization('./conf.yaml') quantized_model = quantizer("./mobilenet_v1_1.0_224_frozen.pb") tf.io.write_graph(graph_or_graph_def=quantized_model, @@ -19,7 +19,7 @@ def main(): as_text=False) if args.benchmark: - from lpot import Benchmark + from neural_compressor import Benchmark evaluator = Benchmark('./conf.yaml') results = evaluator('./int8.pb') batch_size = 1 diff --git a/examples/mxnet/image_recognition/README.md b/examples/mxnet/image_recognition/README.md index d9c9eac59b2..3cebf3794e8 100644 --- a/examples/mxnet/image_recognition/README.md +++ b/examples/mxnet/image_recognition/README.md @@ -53,32 +53,32 @@ This document is used to list steps of reproducing MXNet ResNet18_v1/ResNet50_v1 # Run ### ResNet18_v1 ```bash -bash run_tuning.sh --topology=resnet18_v1 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./lpot_resnet18 +bash run_tuning.sh --topology=resnet18_v1 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./nc_resnet18 ``` ### ResNet50_v1 ```bash -bash run_tuning.sh --topology=resnet50_v1 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./lpot_resnet50_v1 +bash run_tuning.sh --topology=resnet50_v1 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./nc_resnet50_v1 ``` ### ResNet152_v1 ```bash -bash run_tuning.sh --topology=resnet152_v1 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./lpot_resnet152_v1 +bash run_tuning.sh --topology=resnet152_v1 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./nc_resnet152_v1 ``` ### SqueezeNet1 ```bash -bash run_tuning.sh --topology=squeezenet1.0 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./lpot_squeezenet +bash run_tuning.sh --topology=squeezenet1.0 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./nc_squeezenet ``` ### MobileNet1.0 ```bash -bash run_tuning.sh --topology=mobilenet1.0 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./lpot_mobilenet1.0 +bash run_tuning.sh --topology=mobilenet1.0 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./nc_mobilenet1.0 ``` ### MobileNetv2_1.0 ```bash -bash run_tuning.sh --topology=mobilenetv2_1.0 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./lpot_mobilenetv2_1.0 +bash run_tuning.sh --topology=mobilenetv2_1.0 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./nc_mobilenetv2_1.0 ``` ### Inception_v3 ```bash -bash run_tuning.sh --topology=inceptionv3 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./lpot_inception_v3 +bash run_tuning.sh --topology=inceptionv3 --dataset_location=./data/val_256_q90.rec --input_model=/PATH/TO/MODEL --output_model=./nc_inception_v3 ``` # Benchmark @@ -93,20 +93,20 @@ bash run_benchmark.sh --topology=resnet18_v1 --dataset_location=./data/val_256_q bash run_benchmark.sh --topology=resnet18_v1 --dataset_location=./data/val_256_q90.rec --input_model=./model --batch_size=32 --iters=100 --mode=benchmark ``` -Examples of enabling Intel® Low Precision Optimization Tool auto tuning on MXNet ResNet50 +Examples of enabling Intel® Neural Compressor auto tuning on MXNet ResNet50 ======================================================= -This is a tutorial of how to enable a MXNet classification model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable a MXNet classification model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports two usages: +Intel® Neural Compressor supports two usages: 1. User specifies fp32 "model", calibration dataset "q_dataloader", evaluation dataset "eval_dataloader" and metric in tuning.metric field of model-specific yaml config file. 2. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. ->As ResNet50_v1/Squeezenet1.0/MobileNet1.0/MobileNetv2_1.0/Inceptionv3 series are typical classification models, use Top-K as metric which is built-in supported by Intel® Low Precision Optimization Tool. So here we integrate MXNet ResNet with Intel® Low Precision Optimization Tool by the first use case for simplicity. +>As ResNet50_v1/Squeezenet1.0/MobileNet1.0/MobileNetv2_1.0/Inceptionv3 series are typical classification models, use Top-K as metric which is built-in supported by Intel® Neural Compressor. So here we integrate MXNet ResNet with Intel® Neural Compressor by the first use case for simplicity. ### Write Yaml config file @@ -120,8 +120,8 @@ model: # mandatory. used to specif name: cnn framework: mxnet # mandatory. possible values are tensorflow, mxnet, pytorch, pytorch_ipex, onnxrt_integerops and onnxrt_qlinearops. -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. @@ -142,7 +142,7 @@ Here we choose topk built-in metric and set accuracy target as tolerating 0.01 r After prepare step is done, we just need update imagenet_inference.py like below. ```python - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common fp32_model = load_model(symbol_file, param_file, logger) quantizer = Quantization("./cnn.yaml") quantizer.model = common.Model(fp32_model) diff --git a/examples/mxnet/image_recognition/cnn.yaml b/examples/mxnet/image_recognition/cnn.yaml index dd46f3b4c8b..787949aef98 100644 --- a/examples/mxnet/image_recognition/cnn.yaml +++ b/examples/mxnet/image_recognition/cnn.yaml @@ -17,8 +17,8 @@ model: # mandatory. used to specif name: cnn framework: mxnet # mandatory. possible values are tensorflow, mxnet, pytorch, pytorch_ipex, onnxrt_integerops and onnxrt_qlinearops. -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. diff --git a/examples/mxnet/image_recognition/imagenet_inference.py b/examples/mxnet/image_recognition/imagenet_inference.py index 037742132cf..7ee5c03c6b8 100644 --- a/examples/mxnet/image_recognition/imagenet_inference.py +++ b/examples/mxnet/image_recognition/imagenet_inference.py @@ -235,7 +235,7 @@ def save(model, output_path): choices=['', 'float16', 'bfloat16'], help='enable low precision') parser.add_argument('--tune',action='store_true', default=False, - help='Get tuning quantization model with lpot.') + help='Get tuning quantization model with neural_compressor.') parser.add_argument('--accuracy-only', action='store_true', help='accuracy only benchmark') parser.add_argument("--output-graph", help='Specify tune result model save dir', @@ -311,7 +311,7 @@ def save(model, output_path): **combine_mean_std) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common # loading model fp32_model = load_model(symbol_file, param_file, logger) diff --git a/examples/mxnet/image_recognition/requirements.txt b/examples/mxnet/image_recognition/requirements.txt index eb9ed330b37..e7f9d88c0c4 100644 --- a/examples/mxnet/image_recognition/requirements.txt +++ b/examples/mxnet/image_recognition/requirements.txt @@ -1,5 +1,5 @@ mxnet gluoncv -lpot +neural-compressor pillow>=8.2.0 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/examples/mxnet/language_translation/README.md b/examples/mxnet/language_translation/README.md index 87a947a1834..96592e50bb6 100644 --- a/examples/mxnet/language_translation/README.md +++ b/examples/mxnet/language_translation/README.md @@ -10,8 +10,8 @@ This document is used to list steps of reproducing MXNet BERT_base MRPC/Squad tu ### 1. Installation ```Shell - # Install Intel® Low Precision Optimization Tool - pip install lpot + # Install Intel® Neural Compressor + pip install neural-compressor # Install MXNet pip install mxnet @@ -67,20 +67,20 @@ python3 finetune_squad.py \ ``` -Examples of enabling Intel® Low Precision Optimization Tool auto tuning on MXNet BERT_base +Examples of enabling Intel® Neural Compressor auto tuning on MXNet BERT_base ======================================================= -This is a tutorial of how to enable a MXNet BERT base model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable a MXNet BERT base model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports two usages: +Intel® Neural Compressor supports two usages: 1. User specifies fp32 "model", calibration dataset "q_dataloader", evaluation dataset "eval_dataloader" and metric in tuning.metric field of model-specific yaml config file. 2. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -We integrate MXNet BERT_base MRPC/Squad with Intel® Low Precision Optimization Tool by the second use case. +We integrate MXNet BERT_base MRPC/Squad with Intel® Neural Compressor by the second use case. ### Write Yaml config file @@ -107,7 +107,7 @@ Because we use the second use case which need user to provide a custom "eval_fun ### code update -First, we need to construct evaluate function for lpot. At eval_func, we get the dev_data_list for the origin script, and return acc metric to lpot. +First, we need to construct evaluate function for neural_compressor. At eval_func, we get the dev_data_list for the origin script, and return acc metric to neural_compressor. ```python # define test_func @@ -128,9 +128,9 @@ First, we need to construct evaluate function for lpot. At eval_func, we get the After prepare step is done, we just need update main.py like below. ```python - # Intel® Low Precision Optimization Tool auto-tuning + # Intel® Neural Compressor auto-tuning calib_data = dev_data_list[0][1] - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./bert.yaml") quantizer.model = common.Model(model) quantizer.calib_dataloader = calib_data diff --git a/examples/mxnet/language_translation/finetune_classifier.py b/examples/mxnet/language_translation/finetune_classifier.py index f6e8d065995..6722bbcf028 100644 --- a/examples/mxnet/language_translation/finetune_classifier.py +++ b/examples/mxnet/language_translation/finetune_classifier.py @@ -201,7 +201,7 @@ help='calibration mode used for generating calibration table ' 'for the quantized symbol.') parser.add_argument('--tune',action='store_true', default=False, - help='Get bert tuning quantization model with lpot.') + help='Get bert tuning quantization model with neural_compressor.') args = parser.parse_args() @@ -749,10 +749,10 @@ def test_func(graph): nlp.utils.version.check_version('1.7.0', warning_only=True, library=mx) warnings.warn('INT8 Quantization for BERT need mxnet-mkl >= 1.6.0b20200115') elif args.tune: - # lpot auto-tuning + # neural_compressor auto-tuning if only_inference: calib_data = dev_data_list[0][1] - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./bert.yaml") quantizer.model = common.Model(model) quantizer.calib_dataloader = calib_data diff --git a/examples/mxnet/language_translation/finetune_squad.py b/examples/mxnet/language_translation/finetune_squad.py index c921762ae3a..574a7df421e 100644 --- a/examples/mxnet/language_translation/finetune_squad.py +++ b/examples/mxnet/language_translation/finetune_squad.py @@ -243,7 +243,7 @@ help='calibration mode used for generating calibration table ' 'for the quantized symbol.') parser.add_argument('--tune',action='store_true', default=False, - help='Get bert tuning quantization model with lpot.') + help='Get bert tuning quantization model with neural_compressor.') args = parser.parse_args() @@ -848,7 +848,7 @@ def preprocess_dataset(tokenizer, def gen_dataset(): - """generate dataset for lpot.""" + """generate dataset for neural_compressor.""" log.info('Loading dev data...') if version_2: dev_data = SQuAD('dev', version='2.0') @@ -882,7 +882,7 @@ def gen_dataset(): return dev_dataloader def eval_func(model): - """evaluation function for lpot.""" + """evaluation function for neural_compressor.""" EM_acc = evaluate(model) return EM_acc @@ -900,9 +900,9 @@ def eval_func(model): train() evaluate(net) elif args.tune: - # lpot auto-tuning + # neural_compressor auto-tuning dev_dataloader = gen_dataset() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./bert.yaml") quantizer.model = common.Model(net) quantizer.calib_dataloader = dev_dataloader diff --git a/examples/mxnet/object_detection/README.md b/examples/mxnet/object_detection/README.md index 98934572557..5e68caa092a 100644 --- a/examples/mxnet/object_detection/README.md +++ b/examples/mxnet/object_detection/README.md @@ -39,22 +39,22 @@ This document describes the step-by-step instructions for reproducing MXNet SSD- ### SSD-ResNet50_v1-VOC ```bash -bash run_tuning.sh --topology=ssd-resnet50_v1 --dataset_name=voc --dataset_location=/PATH/TO/DATASET --output_model=./lpot_ssd_resnet50_voc +bash run_tuning.sh --topology=ssd-resnet50_v1 --dataset_name=voc --dataset_location=/PATH/TO/DATASET --output_model=./nc_ssd_resnet50_voc ``` ### SSD-Mobilenet1.0-VOC ```bash -bash run_tuning.sh --topology=ssd-mobilenet1.0 --dataset_name=voc --dataset_location=/PATH/TO/DATASET --output_model=./lpot_ssd_mobilenet1.0_voc +bash run_tuning.sh --topology=ssd-mobilenet1.0 --dataset_name=voc --dataset_location=/PATH/TO/DATASET --output_model=./nc_ssd_mobilenet1.0_voc ``` ### SSD-ResNet50_v1-COCO ```bash -bash run_tuning.sh --topology=ssd-resnet50_v1 --dataset_name=coco --dataset_location=/PATH/TO/DATASET --output_model=./lpot_ssd_resnet50_coco +bash run_tuning.sh --topology=ssd-resnet50_v1 --dataset_name=coco --dataset_location=/PATH/TO/DATASET --output_model=./nc_ssd_resnet50_coco ``` ### SSD-Mobilenet1.0-COCO ```bash -bash run_tuning.sh --topology=ssd-mobilenet1.0 --dataset_name=coco --dataset_location=/PATH/TO/DATASET --output_model=./lpot_ssd_mobilenet1.0_coco +bash run_tuning.sh --topology=ssd-mobilenet1.0 --dataset_name=coco --dataset_location=/PATH/TO/DATASET --output_model=./nc_ssd_mobilenet1.0_coco ``` # benchmark @@ -72,7 +72,7 @@ For more detail, see: ```bash bash run_tuning.sh -h - Desc: Run lpot MXNet Object Detection example. + Desc: Run neural_compressor MXNet Object Detection example. -h --help help info @@ -84,23 +84,23 @@ For more detail, see: --input_model prefix of fp32 model (eg: ./model/ssd-mobilenet ) - --output_model Best tuning model by lpot will saved in this name prefix. default is './lpot_ssd_model' + --output_model Best tuning model by neural_compressor will saved in this name prefix. default is './nc_ssd_model' ``` -Examples of enabling Intel® Low Precision Optimization Tool auto tuning on MXNet Object detection +Examples of enabling Intel® Neural Compressor auto tuning on MXNet Object detection ======================================================= -This is a tutorial of how to enable a MXNet Object detection model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable a MXNet Object detection model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports two usages: +Intel® Neural Compressor supports two usages: 1. User specifies fp32 "model", calibration dataset "q_dataloader", evaluation dataset "eval_dataloader" and metric in tuning.metric field of model-specific yaml config file. 2. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -As this example use VOC/COCO dataset, use VOCMApMetrics/COCOEval as metric which is can find [here](https://github.com/dmlc/gluon-cv/blob/20a2ed3942720550728ce36c2be53b2d5bbbb6fd/gluoncv/utils/metrics/voc_detection.py#L13) and [here](https://cocodataset.org/). So we integrate MXNet SSD-ResNet50_v1/SSD-Mobilenet1.0 with Intel® Low Precision Optimization Tool by the second use case. +As this example use VOC/COCO dataset, use VOCMApMetrics/COCOEval as metric which is can find [here](https://github.com/dmlc/gluon-cv/blob/20a2ed3942720550728ce36c2be53b2d5bbbb6fd/gluoncv/utils/metrics/voc_detection.py#L13) and [here](https://cocodataset.org/). So we integrate MXNet SSD-ResNet50_v1/SSD-Mobilenet1.0 with Intel® Neural Compressor by the second use case. ### Write Yaml config file @@ -127,7 +127,7 @@ Because we use the second use case which need user to provide a custom "eval_fun ### code update -First, we need to construct evaluate function for Intel® Low Precision Optimization Tool. At eval_func, we get the val_dataset for the origin script, and return mAP metric to Intel® Low Precision Optimization Tool. +First, we need to construct evaluate function for Intel® Neural Compressor. At eval_func, we get the val_dataset for the origin script, and return mAP metric to Intel® Neural Compressor. ```python # define test_func @@ -151,7 +151,7 @@ After preparation is done, we just need update main.py like below. ```python # Doing auto-tuning here - from lpot.experimental import Quantization + from neural_compressor.experimental import Quantization quantizer = Quantization("./ssd.yaml") quantizer.model = common.Model(net) quantizer.calib_dataloader = val_data diff --git a/examples/mxnet/object_detection/eval_ssd.py b/examples/mxnet/object_detection/eval_ssd.py index 3c022dd4c2f..04273bdd574 100644 --- a/examples/mxnet/object_detection/eval_ssd.py +++ b/examples/mxnet/object_detection/eval_ssd.py @@ -70,7 +70,7 @@ def parse_args(): ' inference dataset.') parser.add_argument('--dataset-location', type=str, default='~/.mxnet/datasets/voc/', help='eval dataset.') parser.add_argument('--tune',action='store_true', default=False, - help='Get bert tuning quantization model with lpot.') + help='Get bert tuning quantization model with neural_compressor.') parser.add_argument("--output-graph", help='Specify tune result model save dir', dest='output_graph') @@ -102,7 +102,7 @@ def get_dataloader(val_dataset, data_shape, batch_size, num_workers): # val_loader = gluon.data.DataLoader( # val_dataset.transform(SSDDefaultValTransform(width, height)), batchify_fn=batchify_fn, # batch_size=batch_size, shuffle=False, last_batch='rollover', num_workers=num_workers) - from lpot.experimental import data + from neural_compressor.experimental import data val_loader = data.DATALOADERS['mxnet']( val_dataset.transform(SSDDefaultValTransform(width, height)), collate_fn=batchify_fn, batch_size=batch_size, last_batch='rollover', num_workers=num_workers) @@ -270,7 +270,7 @@ def eval_func(graph): if args.tune: # Doing auto-tuning here - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./ssd.yaml") quantizer.model = common.Model(net) quantizer.calib_dataloader = val_data diff --git a/examples/mxnet/object_detection/requirements.txt b/examples/mxnet/object_detection/requirements.txt index f456642c6f2..8b9b5e35519 100644 --- a/examples/mxnet/object_detection/requirements.txt +++ b/examples/mxnet/object_detection/requirements.txt @@ -1,6 +1,6 @@ mxnet gluoncv -lpot +neural-compressor pycocotools pillow>=8.2.0 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/examples/mxnet/object_detection/run_benchmark.sh b/examples/mxnet/object_detection/run_benchmark.sh index 27b43284bb3..cbf53bf2386 100755 --- a/examples/mxnet/object_detection/run_benchmark.sh +++ b/examples/mxnet/object_detection/run_benchmark.sh @@ -5,7 +5,7 @@ help() { cat <<- EOF - Desc: Run lpot MXNet Object Detection example. + Desc: Run neural_compressor MXNet Object Detection example. -h --help help info @@ -32,7 +32,7 @@ function main { # default parameters topology='mobilenet1.0' dataset='voc' - output_model='./lpot_ssd_model' + output_model='./nc_ssd_model' dataset_location='~/.mxnet/datasets/' batch_size=32 iters=10 diff --git a/examples/mxnet/object_detection/run_tuning.sh b/examples/mxnet/object_detection/run_tuning.sh index 6a8efcae250..dc352b95a4e 100755 --- a/examples/mxnet/object_detection/run_tuning.sh +++ b/examples/mxnet/object_detection/run_tuning.sh @@ -5,7 +5,7 @@ help() { cat <<- EOF - Desc: Run lpot MXNet Object Detection example. + Desc: Run neural_compressor MXNet Object Detection example. -h --help help info @@ -17,7 +17,7 @@ help() --input_model prefix of fp32 model (eg: ./model/ssd-mobilenet ) - --output_model Best tuning model by lpot will saved in this name prefix. default is './lpot_ssd_model' + --output_model Best tuning model by neural_compressor will saved in this name prefix. default is './nc_ssd_model' EOF exit 0 @@ -33,7 +33,7 @@ function main { topology='ssd_mobilenet1.0' dataset='voc' -output_model='./lpot_ssd_model' +output_model='./nc_ssd_model' dataset_location='~/.mxnet/datasets/' # init params function init_params { diff --git a/examples/onnxrt/image_recognition/mobilenet_v2/main.py b/examples/onnxrt/image_recognition/mobilenet_v2/main.py index 752e9f8c55c..8e262ff1291 100644 --- a/examples/onnxrt/image_recognition/mobilenet_v2/main.py +++ b/examples/onnxrt/image_recognition/mobilenet_v2/main.py @@ -70,13 +70,13 @@ model = onnx.load(args.model_path) if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) @@ -84,7 +84,7 @@ q_model.save(args.output_model) if args.benchmark: - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(args.config) evaluator.model = common.Model(q_model) evaluator(args.mode) diff --git a/examples/onnxrt/image_recognition/mobilenet_v2/mobilenet_v2.yaml b/examples/onnxrt/image_recognition/mobilenet_v2/mobilenet_v2.yaml index e63a75d04a1..02fcfa57a38 100644 --- a/examples/onnxrt/image_recognition/mobilenet_v2/mobilenet_v2.yaml +++ b/examples/onnxrt/image_recognition/mobilenet_v2/mobilenet_v2.yaml @@ -43,8 +43,8 @@ quantization: # optional. tuning constrai } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/onnxrt/image_recognition/mobilenet_v3/main.py b/examples/onnxrt/image_recognition/mobilenet_v3/main.py index b5118525786..581341581ad 100644 --- a/examples/onnxrt/image_recognition/mobilenet_v3/main.py +++ b/examples/onnxrt/image_recognition/mobilenet_v3/main.py @@ -75,13 +75,13 @@ model = onnx.load(args.model_path) if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) @@ -89,7 +89,7 @@ q_model.save(args.output_model) if args.benchmark: - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(args.config) evaluator.model = common.Model(q_model) evaluator(args.mode) diff --git a/examples/onnxrt/image_recognition/mobilenet_v3/mobilenet_v3.yaml b/examples/onnxrt/image_recognition/mobilenet_v3/mobilenet_v3.yaml index 7360d90525e..c5e2bc1b83b 100644 --- a/examples/onnxrt/image_recognition/mobilenet_v3/mobilenet_v3.yaml +++ b/examples/onnxrt/image_recognition/mobilenet_v3/mobilenet_v3.yaml @@ -32,8 +32,8 @@ quantization: # optional. tuning constrai height: 224 width: 224 -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/onnxrt/image_recognition/resnet50/main.py b/examples/onnxrt/image_recognition/resnet50/main.py index 51c3538c382..6d3efcd3f7b 100644 --- a/examples/onnxrt/image_recognition/resnet50/main.py +++ b/examples/onnxrt/image_recognition/resnet50/main.py @@ -71,13 +71,13 @@ model = onnx.load(args.model_path) if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) @@ -85,7 +85,7 @@ q_model.save(args.output_model) if args.benchmark: - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(args.config) evaluator.model = common.Model(q_model) evaluator(args.mode) diff --git a/examples/onnxrt/image_recognition/resnet50/resnet50_v1_5.yaml b/examples/onnxrt/image_recognition/resnet50/resnet50_v1_5.yaml index 5858b6c3a8a..5bb0f619d18 100644 --- a/examples/onnxrt/image_recognition/resnet50/resnet50_v1_5.yaml +++ b/examples/onnxrt/image_recognition/resnet50/resnet50_v1_5.yaml @@ -39,8 +39,8 @@ quantization: # optional. tuning constrai } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/onnxrt/image_recognition/resnet50/resnet50_v1_5_mlperf.yaml b/examples/onnxrt/image_recognition/resnet50/resnet50_v1_5_mlperf.yaml index 9ac766dc8be..e7aa83c1c23 100644 --- a/examples/onnxrt/image_recognition/resnet50/resnet50_v1_5_mlperf.yaml +++ b/examples/onnxrt/image_recognition/resnet50/resnet50_v1_5_mlperf.yaml @@ -47,8 +47,8 @@ quantization: # optional. tuning constrai } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/onnxrt/image_recognition/vgg16/main.py b/examples/onnxrt/image_recognition/vgg16/main.py index 3668aac7503..5a8e16f41d9 100644 --- a/examples/onnxrt/image_recognition/vgg16/main.py +++ b/examples/onnxrt/image_recognition/vgg16/main.py @@ -72,13 +72,13 @@ model = onnx.load(args.model_path) if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) @@ -86,7 +86,7 @@ q_model.save(args.output_model) if args.benchmark: - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(args.config) evaluator.model = common.Model(q_model) evaluator(args.mode) diff --git a/examples/onnxrt/image_recognition/vgg16/vgg16.yaml b/examples/onnxrt/image_recognition/vgg16/vgg16.yaml index 8a08ec3f294..09374ebecbd 100644 --- a/examples/onnxrt/image_recognition/vgg16/vgg16.yaml +++ b/examples/onnxrt/image_recognition/vgg16/vgg16.yaml @@ -39,8 +39,8 @@ quantization: # optional. tuning constrai } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/onnxrt/language_translation/bert/bert_dynamic.yaml b/examples/onnxrt/language_translation/bert/bert_dynamic.yaml index 997cda4322f..ab48203677c 100644 --- a/examples/onnxrt/language_translation/bert/bert_dynamic.yaml +++ b/examples/onnxrt/language_translation/bert/bert_dynamic.yaml @@ -32,8 +32,8 @@ quantization: model_type: bert dynamic_length: False -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: GLUE: task: mrpc # built-in metrics are topk, map, f1, allow user to register new metric. diff --git a/examples/onnxrt/language_translation/bert/bert_static.yaml b/examples/onnxrt/language_translation/bert/bert_static.yaml index 12acf3c216a..fd72153122a 100644 --- a/examples/onnxrt/language_translation/bert/bert_static.yaml +++ b/examples/onnxrt/language_translation/bert/bert_static.yaml @@ -39,7 +39,7 @@ quantization: } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. accuracy: metric: GLUE: diff --git a/examples/onnxrt/language_translation/bert/main.py b/examples/onnxrt/language_translation/bert/main.py index ac7bad61735..2b97587444a 100644 --- a/examples/onnxrt/language_translation/bert/main.py +++ b/examples/onnxrt/language_translation/bert/main.py @@ -80,7 +80,7 @@ args = parser.parse_args() if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common model = onnx.load(args.model_path) evaluator = Benchmark(args.config) evaluator.model = common.Model(model) @@ -100,7 +100,7 @@ optimization_options=opt_options) model = model_optimizer.model - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) q_model = quantize() diff --git a/examples/onnxrt/language_translation/distilbert/distilbert.yaml b/examples/onnxrt/language_translation/distilbert/distilbert.yaml index a8c9b524c6b..aaf9cd29e51 100644 --- a/examples/onnxrt/language_translation/distilbert/distilbert.yaml +++ b/examples/onnxrt/language_translation/distilbert/distilbert.yaml @@ -32,8 +32,8 @@ quantization: model_type: distilbert dynamic_length: False -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: GLUE: task: mrpc # built-in metrics are topk, map, f1, allow user to register new metric. diff --git a/examples/onnxrt/language_translation/distilbert/main.py b/examples/onnxrt/language_translation/distilbert/main.py index 28e6b500d59..a09110c6e63 100644 --- a/examples/onnxrt/language_translation/distilbert/main.py +++ b/examples/onnxrt/language_translation/distilbert/main.py @@ -80,7 +80,7 @@ args = parser.parse_args() if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common model = onnx.load(args.model_path) evaluator = Benchmark(args.config) evaluator.model = common.Model(model) @@ -100,7 +100,7 @@ optimization_options=opt_options) model = model_optimizer.model - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) q_model = quantize() diff --git a/examples/onnxrt/language_translation/mobilebert/main.py b/examples/onnxrt/language_translation/mobilebert/main.py index 43d577ec1dd..2cd9042f41f 100644 --- a/examples/onnxrt/language_translation/mobilebert/main.py +++ b/examples/onnxrt/language_translation/mobilebert/main.py @@ -80,7 +80,7 @@ args = parser.parse_args() if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common model = onnx.load(args.model_path) evaluator = Benchmark(args.config) evaluator.model = common.Model(model) @@ -100,7 +100,7 @@ optimization_options=opt_options) model = model_optimizer.model - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) q_model = quantize() diff --git a/examples/onnxrt/language_translation/mobilebert/mobilebert.yaml b/examples/onnxrt/language_translation/mobilebert/mobilebert.yaml index d4941cf5a3f..fb8971e8cc9 100644 --- a/examples/onnxrt/language_translation/mobilebert/mobilebert.yaml +++ b/examples/onnxrt/language_translation/mobilebert/mobilebert.yaml @@ -17,8 +17,8 @@ model: # mandatory. used to specif name: mobilebert framework: onnxrt_integerops # mandatory. possible values are tensorflow, mxnet, pytorch, pytorch_ipex, onnxrt_integerops and onnxrt_qlinearops. -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: GLUE: task: mrpc diff --git a/examples/onnxrt/language_translation/roberta/main.py b/examples/onnxrt/language_translation/roberta/main.py index c0f5cf23093..93726567d4c 100644 --- a/examples/onnxrt/language_translation/roberta/main.py +++ b/examples/onnxrt/language_translation/roberta/main.py @@ -96,7 +96,7 @@ args = parser.parse_args() if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common model = onnx.load(args.model_path) evaluator = Benchmark(args.config) evaluator.model = common.Model(model) @@ -116,7 +116,7 @@ optimization_options=opt_options) model = model_optimizer.model - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) q_model = quantize() diff --git a/examples/onnxrt/language_translation/roberta/roberta.yaml b/examples/onnxrt/language_translation/roberta/roberta.yaml index 73761846884..6a6d7abf3b4 100644 --- a/examples/onnxrt/language_translation/roberta/roberta.yaml +++ b/examples/onnxrt/language_translation/roberta/roberta.yaml @@ -38,8 +38,8 @@ quantization: } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: GLUE: task: mrpc # built-in metrics are topk, map, f1, allow user to register new metric. diff --git a/examples/onnxrt/object_detection/ssd_mobilenet_v1/main.py b/examples/onnxrt/object_detection/ssd_mobilenet_v1/main.py index bc452f0d0f8..693f700be93 100644 --- a/examples/onnxrt/object_detection/ssd_mobilenet_v1/main.py +++ b/examples/onnxrt/object_detection/ssd_mobilenet_v1/main.py @@ -70,13 +70,13 @@ model = onnx.load(args.model_path) if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) @@ -84,7 +84,7 @@ q_model.save(args.output_model) if args.benchmark: - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(args.config) evaluator.model = common.Model(q_model) evaluator(args.mode) diff --git a/examples/onnxrt/object_detection/ssd_mobilenet_v1/ssd_mobilenet_v1.yaml b/examples/onnxrt/object_detection/ssd_mobilenet_v1/ssd_mobilenet_v1.yaml index 8aa8c928a9c..9f1816b4a0a 100644 --- a/examples/onnxrt/object_detection/ssd_mobilenet_v1/ssd_mobilenet_v1.yaml +++ b/examples/onnxrt/object_detection/ssd_mobilenet_v1/ssd_mobilenet_v1.yaml @@ -23,7 +23,7 @@ quantization: # optional. tuning constrai approach: post_training_static_quant calibration: sampling_size: 50 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. dataset: COCORaw: root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation raw image folder @@ -72,14 +72,14 @@ quantization: # optional. tuning constrai } evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: COCOmAP: {} configs: # optional. if not specified, use all cores in 1 socket. cores_per_instance: 28 num_of_instance: 1 kmp_blocktime: 1 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 16 dataset: COCORaw: diff --git a/examples/onnxrt/object_detection/ssd_mobilenet_v2/main.py b/examples/onnxrt/object_detection/ssd_mobilenet_v2/main.py index c2c6bb9450b..06d9c4c3686 100644 --- a/examples/onnxrt/object_detection/ssd_mobilenet_v2/main.py +++ b/examples/onnxrt/object_detection/ssd_mobilenet_v2/main.py @@ -70,13 +70,13 @@ model = onnx.load(args.model_path) if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) @@ -84,7 +84,7 @@ q_model.save(args.output_model) if args.benchmark: - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(args.config) evaluator.model = common.Model(q_model) evaluator(args.mode) diff --git a/examples/onnxrt/object_detection/ssd_mobilenet_v2/ssd_mobilenet_v2.yaml b/examples/onnxrt/object_detection/ssd_mobilenet_v2/ssd_mobilenet_v2.yaml index 313838b90a8..a815592f0ba 100644 --- a/examples/onnxrt/object_detection/ssd_mobilenet_v2/ssd_mobilenet_v2.yaml +++ b/examples/onnxrt/object_detection/ssd_mobilenet_v2/ssd_mobilenet_v2.yaml @@ -23,7 +23,7 @@ quantization: # optional. tuning constrai approach: post_training_static_quant calibration: sampling_size: 50 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. dataset: COCORaw: root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation raw image folder @@ -72,14 +72,14 @@ quantization: # optional. tuning constrai } evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: COCOmAP: {} configs: # optional. if not specified, use all cores in 1 socket. cores_per_instance: 28 num_of_instance: 1 kmp_blocktime: 1 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 16 dataset: COCORaw: diff --git a/examples/onnxrt/onnx_model_zoo/bert-squad/bert.yaml b/examples/onnxrt/onnx_model_zoo/bert-squad/bert.yaml index 402463bcc7f..03eafd7a2b5 100644 --- a/examples/onnxrt/onnx_model_zoo/bert-squad/bert.yaml +++ b/examples/onnxrt/onnx_model_zoo/bert-squad/bert.yaml @@ -17,7 +17,7 @@ model: # mandatory. used to specif name: bert framework: onnxrt_integerops # mandatory. possible values are tensorflow, mxnet, pytorch, pytorch_ipex, onnxrt_integerops and onnxrt_qlinearops. -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. performance: # optional. used to benchmark performance of passing model. warmup: 0 iteration: 100 diff --git a/examples/onnxrt/onnx_model_zoo/bert-squad/main.py b/examples/onnxrt/onnx_model_zoo/bert-squad/main.py index c6bd4976aa2..5d710f34716 100644 --- a/examples/onnxrt/onnx_model_zoo/bert-squad/main.py +++ b/examples/onnxrt/onnx_model_zoo/bert-squad/main.py @@ -115,7 +115,7 @@ def main(): parser.add_argument('--data_dir', type=str, help='datseset path') parser.add_argument('--tune', action='store_true', default=False, - help='run lpot tune') + help='run neural_compressor tune') parser.add_argument('--benchmark', action='store_true', default=False, help='run benchmark') parser.add_argument('--mode', type=str, default='performance', @@ -142,7 +142,7 @@ def eval_func(model): return evaluate_squad(model, eval_dataloader, input_ids, eval_examples, extra_data, input_file) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization('./bert.yaml') quantize.model = common.Model(model) quantize.calib_dataloader = eval_dataloader @@ -158,13 +158,13 @@ def eval_func(model): if args.benchmark and args.mode == "performance": model = onnx.load(args.model_path) - from lpot.experimental.data.datasets.dummy_dataset import DummyDataset - from lpot.experimental.data.dataloaders.onnxrt_dataloader import ONNXRTDataLoader + from neural_compressor.experimental.data.datasets.dummy_dataset import DummyDataset + from neural_compressor.experimental.data.dataloaders.onnxrt_dataloader import ONNXRTDataLoader shapes, lows, highs = parse_dummy_input(model, args.benchmark_nums, max_seq_length) dummy_dataset = DummyDataset(shapes, low=lows, high=highs, dtype="int64", label=True) dummy_dataloader = ONNXRTDataLoader(dummy_dataset) - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.b_dataloader = dummy_dataloader evaluator.model = common.Model(model) diff --git a/examples/onnxrt/onnx_model_zoo/bert-squad/readme.md b/examples/onnxrt/onnx_model_zoo/bert-squad/readme.md index f600c8f75fd..c2361eb20c0 100644 --- a/examples/onnxrt/onnx_model_zoo/bert-squad/readme.md +++ b/examples/onnxrt/onnx_model_zoo/bert-squad/readme.md @@ -23,7 +23,7 @@ Download BERT-Squad from [onnx model zoo](https://github.com/onnx/models/tree/ma wget https://github.com/onnx/models/blob/master/text/machine_comprehension/bert-squad/model/bertsquad-10.onnx ``` -Update BERT-Squad model opset version to 12 due to lpot requirement. +Update BERT-Squad model opset version to 12 due to neural_compressor requirement. ```python import onnx diff --git a/examples/onnxrt/onnx_model_zoo/gpt2/gpt2.py b/examples/onnxrt/onnx_model_zoo/gpt2/gpt2.py index 04c4df55f7b..02449c283cf 100644 --- a/examples/onnxrt/onnx_model_zoo/gpt2/gpt2.py +++ b/examples/onnxrt/onnx_model_zoo/gpt2/gpt2.py @@ -200,7 +200,7 @@ def main(): parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--tune',action='store_true', default=False, - help='Get bert tuning quantization model with lpot.') + help='Get bert tuning quantization model with neural_compressor.') parser.add_argument('--config',type=str, help='Tuning config file path') parser.add_argument('--output_model',type=str, default='gpt2_tune.onnx', @@ -260,7 +260,7 @@ def eval_func(model): optimization_options=opt_options) model = model_optimizer.model - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) quantize.calib_dataloader = common.DataLoader(ds, batch_size=args.per_gpu_eval_batch_size) diff --git a/examples/onnxrt/onnx_model_zoo/mobilebert/main.py b/examples/onnxrt/onnx_model_zoo/mobilebert/main.py index f7ba6dfb12d..fd77db77ad3 100644 --- a/examples/onnxrt/onnx_model_zoo/mobilebert/main.py +++ b/examples/onnxrt/onnx_model_zoo/mobilebert/main.py @@ -109,7 +109,7 @@ def main(): parser.add_argument('--data_dir', type=str, help='datseset path') parser.add_argument('--tune', action='store_true', default=False, - help='run lpot tune') + help='run neural_compressor tune') parser.add_argument('--benchmark', action='store_true', default=False, help='run benchmark') parser.add_argument('--mode', type=str, default='performance', @@ -136,7 +136,7 @@ def eval_func(model): return evaluate_squad(model, eval_dataloader, input_ids, eval_examples, extra_data, input_file) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) quantize.calib_dataloader = eval_dataloader @@ -152,13 +152,13 @@ def eval_func(model): if args.benchmark and args.mode == "performance": model = onnx.load(args.model_path) - from lpot.experimental.data.datasets.dummy_dataset import DummyDataset - from lpot.experimental.data.dataloaders.onnxrt_dataloader import ONNXRTDataLoader + from neural_compressor.experimental.data.datasets.dummy_dataset import DummyDataset + from neural_compressor.experimental.data.dataloaders.onnxrt_dataloader import ONNXRTDataLoader shapes, lows, highs = parse_dummy_input(model, args.benchmark_nums, max_seq_length) dummy_dataset = DummyDataset(shapes, low=lows, high=highs, dtype="int32", label=True) dummy_dataloader = ONNXRTDataLoader(dummy_dataset) - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.b_dataloader = dummy_dataloader evaluator.model = common.Model(model) diff --git a/examples/onnxrt/onnx_model_zoo/mobilebert/mobilebert.yaml b/examples/onnxrt/onnx_model_zoo/mobilebert/mobilebert.yaml index b71ddda6d22..d95e7a29757 100644 --- a/examples/onnxrt/onnx_model_zoo/mobilebert/mobilebert.yaml +++ b/examples/onnxrt/onnx_model_zoo/mobilebert/mobilebert.yaml @@ -19,7 +19,7 @@ model: # mandatory. used to specif name: mobilebert framework: onnxrt_integerops # mandatory. possible values are tensorflow, mxnet, pytorch, pytorch_ipex, onnxrt_integerops and onnxrt_qlinearops. -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. performance: # optional. used to benchmark performance of passing model. warmup: 0 iteration: 100 diff --git a/examples/onnxrt/onnx_model_zoo/resnet50/main.py b/examples/onnxrt/onnx_model_zoo/resnet50/main.py index 10fc69a69a1..bda98469150 100644 --- a/examples/onnxrt/onnx_model_zoo/resnet50/main.py +++ b/examples/onnxrt/onnx_model_zoo/resnet50/main.py @@ -71,13 +71,13 @@ model = onnx.load(args.model_path) if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) @@ -85,7 +85,7 @@ q_model.save(args.output_model) if args.benchmark: - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(args.config) evaluator.model = common.Model(q_model) evaluator(args.mode) diff --git a/examples/onnxrt/onnx_model_zoo/resnet50/resnet50_v1_5.yaml b/examples/onnxrt/onnx_model_zoo/resnet50/resnet50_v1_5.yaml index f117be5c706..966dabfbd9c 100644 --- a/examples/onnxrt/onnx_model_zoo/resnet50/resnet50_v1_5.yaml +++ b/examples/onnxrt/onnx_model_zoo/resnet50/resnet50_v1_5.yaml @@ -40,8 +40,8 @@ quantization: # optional. tuning constrai perm: [2, 0, 1] Cast: dtype: float32 -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/onnxrt/onnx_model_zoo/shufflenet/main.py b/examples/onnxrt/onnx_model_zoo/shufflenet/main.py index 8b4ebb79fb2..23be04c3f9d 100644 --- a/examples/onnxrt/onnx_model_zoo/shufflenet/main.py +++ b/examples/onnxrt/onnx_model_zoo/shufflenet/main.py @@ -71,13 +71,13 @@ model = onnx.load(args.model_path) if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) diff --git a/examples/onnxrt/onnx_model_zoo/shufflenet/shufflenetv2.yaml b/examples/onnxrt/onnx_model_zoo/shufflenet/shufflenetv2.yaml index 90ee49a1f0a..8d7cab0cb4e 100644 --- a/examples/onnxrt/onnx_model_zoo/shufflenet/shufflenetv2.yaml +++ b/examples/onnxrt/onnx_model_zoo/shufflenet/shufflenetv2.yaml @@ -40,8 +40,8 @@ quantization: # optional. tuning constrai perm: [2, 0, 1] Cast: dtype: float32 -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/onnxrt/onnx_model_zoo/vgg16/README.md b/examples/onnxrt/onnx_model_zoo/vgg16/README.md index 7dee42403b1..bd6f6379844 100644 --- a/examples/onnxrt/onnx_model_zoo/vgg16/README.md +++ b/examples/onnxrt/onnx_model_zoo/vgg16/README.md @@ -25,7 +25,7 @@ bash run_tuning.sh --input_model=path/to/model \ # model path as *.onnx > Advanced usage > -> Replace the level of 'graph_optimization' in env_path/lpot/adaptor/onnxrt_qlinear.yaml with 'ENABLE_BASIC' can get a lighter quantized model. +> Replace the level of 'graph_optimization' in env_path/neural_compressor/adaptor/onnxrt_qlinear.yaml with 'ENABLE_BASIC' can get a lighter quantized model. ### Performance Usually we need to bind the program to specific cores like 4 cores to get performance under real production environments. diff --git a/examples/onnxrt/onnx_model_zoo/vgg16/main.py b/examples/onnxrt/onnx_model_zoo/vgg16/main.py index 5f9d25dc2d4..c786a6b2951 100644 --- a/examples/onnxrt/onnx_model_zoo/vgg16/main.py +++ b/examples/onnxrt/onnx_model_zoo/vgg16/main.py @@ -71,13 +71,13 @@ model = onnx.load(args.model_path) if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) evaluator(args.mode) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) diff --git a/examples/onnxrt/onnx_model_zoo/vgg16/vgg16.yaml b/examples/onnxrt/onnx_model_zoo/vgg16/vgg16.yaml index c223e4525be..5b1b165fdb7 100644 --- a/examples/onnxrt/onnx_model_zoo/vgg16/vgg16.yaml +++ b/examples/onnxrt/onnx_model_zoo/vgg16/vgg16.yaml @@ -43,8 +43,8 @@ quantization: # optional. tuning constrai Cast: dtype: float32 -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/onnxrt/oob_general.py b/examples/onnxrt/oob_general.py index 36138de5d9d..8a8ffc510cf 100644 --- a/examples/onnxrt/oob_general.py +++ b/examples/onnxrt/oob_general.py @@ -75,7 +75,7 @@ def parse_dummy_input(model, benchmark_nums): default=None, help='Pre-trained bert model onnx file.') parser.add_argument('--tune',action='store_true', default=False, - help='Get bert tuning quantization model with lpot.') + help='Get bert tuning quantization model with neural_compressor.') parser.add_argument('--config',type=str, default=None, help='Tuning config file path') parser.add_argument('--output_model',type=str, default=None, @@ -102,8 +102,8 @@ def parse_dummy_input(model, benchmark_nums): input_shapes = [shape.split('x') for shape in input_shapes] shapes = [tuple([args.benchmark_nums] + [int(dim) for dim in shape]) for shape in input_shapes] - from lpot.experimental.data.datasets.dummy_dataset import DummyDataset - from lpot.experimental.data.dataloaders.onnxrt_dataloader import ONNXRTDataLoader + from neural_compressor.experimental.data.datasets.dummy_dataset import DummyDataset + from neural_compressor.experimental.data.dataloaders.onnxrt_dataloader import ONNXRTDataLoader dummy_dataset = DummyDataset(shapes, low=lows, high=highs, dtype=dtypes, label=True) dummy_dataloader = ONNXRTDataLoader(dummy_dataset, batch_size=args.eval_batch_size) @@ -111,7 +111,7 @@ def eval_func(model): return evaluate_onnxrt(model, dummy_dataloader, reference) if args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(args.config) evaluator.model = common.Model(model) evaluator.b_dataloader = dummy_dataloader @@ -119,7 +119,7 @@ def eval_func(model): if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantize = Quantization(args.config) quantize.model = common.Model(model) quantize.calib_dataloader = dummy_dataloader diff --git a/examples/pytorch/eager/blendcnn/distillation/README.md b/examples/pytorch/eager/blendcnn/distillation/README.md index f1e01283c31..debd4a67c12 100644 --- a/examples/pytorch/eager/blendcnn/distillation/README.md +++ b/examples/pytorch/eager/blendcnn/distillation/README.md @@ -1,7 +1,7 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch BlendCNN distillation(with MRPC dataset) results with Intel® Low Precision Optimization Tool. +This document describes the step-by-step instructions for reproducing PyTorch BlendCNN distillation(with MRPC dataset) results with Intel® Neural Compressor. # Prerequisite diff --git a/examples/pytorch/eager/blendcnn/distillation/distill.py b/examples/pytorch/eager/blendcnn/distillation/distill.py index 518a5085d18..d33d85a50cd 100644 --- a/examples/pytorch/eager/blendcnn/distillation/distill.py +++ b/examples/pytorch/eager/blendcnn/distillation/distill.py @@ -103,7 +103,7 @@ def get_logits(model, batch): tensors = TensorDataset(teacher_logits, *dataset.get_tensors()) # To Tensors train_dataloader = data_iter = DataLoader(tensors, batch_size=cfg_optim.batch_size, shuffle=False) - from lpot.experimental.common.criterion import PyTorchKnowledgeDistillationLoss + from neural_compressor.experimental.common.criterion import PyTorchKnowledgeDistillationLoss criterion = PyTorchKnowledgeDistillationLoss(temperature=args.temperature, loss_types=args.loss_types, loss_weights=args.loss_weights) @@ -143,7 +143,7 @@ def evaluate(model, batch): # train_loop.train(get_loss, None, None) # not use pretrain file # print("Training has been done properly.") - from lpot.experimental import Distillation, common + from neural_compressor.experimental import Distillation, common distiller = Distillation(args.distillation_yaml) def train_func(model): @@ -228,7 +228,7 @@ def eval_func(model): parser.add_argument("--warmup", type=int, default=10, help="warmup for performance") parser.add_argument("--distillation_yaml", default='./distillation.yaml', type=str, metavar='PATH', - help='path to Intel® Low Precision Optimization Tool config file') + help='path to Intel® Neural Compressor config file') parser.add_argument("--temperature", default=1, type=float, help='temperature parameter of distillation') diff --git a/examples/pytorch/eager/blendcnn/ptq/README.md b/examples/pytorch/eager/blendcnn/ptq/README.md index 82e8932e71a..25b68db5c27 100644 --- a/examples/pytorch/eager/blendcnn/ptq/README.md +++ b/examples/pytorch/eager/blendcnn/ptq/README.md @@ -1,13 +1,13 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch BlendCNN tuning(with MRPC dataset) results with Intel® Low Precision Optimization Tool. +This document describes the step-by-step instructions for reproducing PyTorch BlendCNN tuning(with MRPC dataset) results with Intel® Neural Compressor. > **Note** > > PyTorch quantization implementation in imperative path has limitation on automatically execution. > It requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> Intel® Low Precision Optimization Tool has no capability to solve this framework limitation. Intel® Low Precision Optimization Tool supposes user have done these two steps before invoking Intel® Low Precision Optimization Tool interface. +> Intel® Neural Compressor has no capability to solve this framework limitation. Intel® Neural Compressor supposes user have done these two steps before invoking Intel® Neural Compressor interface. > For details, please refer to https://pytorch.org/docs/stable/quantization.html # Prerequisite @@ -79,20 +79,20 @@ python distill.py --loss_weights 0.1 0.9 ``` Follow the above steps, you will find distilled BlendCNN model weights best_model_weights.pt in `./models/blendcnn/`. -Examples of enabling Intel® Low Precision Optimization Tool auto tuning on PyTorch ResNest +Examples of enabling Intel® Neural Compressor auto tuning on PyTorch ResNest =========================================================================================== -This is a tutorial of how to enable a PyTorch classification model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable a PyTorch classification model with Intel® Neural Compressor. ## User Code Analysis -Intel® Low Precision Optimization Tool supports three usages: +Intel® Neural Compressor supports three usages: 1. User only provide fp32 "model", and configure calibration dataset, evaluation dataset and metric in model-specific yaml config file. 2. User provide fp32 "model", calibration dataset "q_dataloader" and evaluation dataset "eval_dataloader", and configure metric in tuning.metric field of model-specific yaml config file. 3. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -As ResNest series are typical classification models, use Top-K as metric which is built-in supported by Intel® Low Precision Optimization Tool. So here we integrate PyTorch ResNest with Intel® Low Precision Optimization Tool by the first use case for simplicity. +As ResNest series are typical classification models, use Top-K as metric which is built-in supported by Intel® Neural Compressor. So here we integrate PyTorch ResNest with Intel® Neural Compressor by the first use case for simplicity. ### Write Yaml config file @@ -128,9 +128,9 @@ It's intrinsic limitation of PyTorch quantization imperative path. No way to dev After prepare step is done, we just need update classify.py like below. ``` -from lpot.experimental import Quantization +from neural_compressor.experimental import Quantization dataloader = Bert_DataLoader(loader=data_iter, batch_size=args.batch_size) -quantizer = Quantization(args.lpot_yaml) +quantizer = Quantization(args.nc_yaml) quantizer.model = model quantizer.calib_dataloader = dataloader quantizer.eval_func = eval_func diff --git a/examples/pytorch/eager/blendcnn/ptq/classify.py b/examples/pytorch/eager/blendcnn/ptq/classify.py index b86f1ff6359..214dbecf912 100644 --- a/examples/pytorch/eager/blendcnn/ptq/classify.py +++ b/examples/pytorch/eager/blendcnn/ptq/classify.py @@ -196,8 +196,8 @@ def eval_func(model): # print(f"Accuracy: {total_accuracy}") if args.tune: - from lpot.experimental import Quantization - # lpot tune + from neural_compressor.experimental import Quantization + # neural_compressor tune model.load_state_dict(torch.load(args.input_model)) dataloader = Bert_DataLoader(loader=data_iter, batch_size=args.batch_size) @@ -209,7 +209,7 @@ def eval_func(model): q_model.save(args.tuned_checkpoint) elif args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load int8_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) print(int8_model) @@ -238,7 +238,7 @@ def eval_func(model): parser.add_argument("--output_model", default='', type=str, metavar='PATH', help='path to put tuned model') parser.add_argument("--tune", action='store_true', - help="run Intel® Low Precision Optimization Tool to tune int8 acc.") + help="run Intel® Neural Compressor to tune int8 acc.") parser.add_argument("--warmup", type=int, default=10, help="warmup for performance") parser.add_argument("--iter", default=0, type=int, @@ -250,10 +250,10 @@ def eval_func(model): parser.add_argument("--accuracy_only", dest='accuracy_only', action='store_true', help='For accuracy measurement only.') parser.add_argument("--tuned_yaml", default='./blendcnn.yaml', type=str, metavar='PATH', - help='path to Intel® Low Precision Optimization Tool config file') + help='path to Intel® Neural Compressor config file') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', help='path to checkpoint tuned by (default: ./saved_results)') parser.add_argument('--int8', dest='int8', action='store_true', - help='run Intel® Low Precision Optimization Tool model benchmark') + help='run Intel® Neural Compressor model benchmark') args = parser.parse_args() main(args.model_config, args) diff --git a/examples/pytorch/eager/blendcnn/ptq/requirements.txt b/examples/pytorch/eager/blendcnn/ptq/requirements.txt index f77fceb52a6..244ac6c7c9b 100644 --- a/examples/pytorch/eager/blendcnn/ptq/requirements.txt +++ b/examples/pytorch/eager/blendcnn/ptq/requirements.txt @@ -1,3 +1,3 @@ -lpot +neural-compressor fire intel-tensorflow diff --git a/examples/pytorch/eager/blendcnn/ptq/run_tuning.sh b/examples/pytorch/eager/blendcnn/ptq/run_tuning.sh index 2522acf784a..b37beaa175a 100755 --- a/examples/pytorch/eager/blendcnn/ptq/run_tuning.sh +++ b/examples/pytorch/eager/blendcnn/ptq/run_tuning.sh @@ -2,7 +2,7 @@ set -x function main { - output_model="./lpot_workspace/pytorch/blendcnn/" + output_model="./nc_workspace/pytorch/blendcnn/" init_params "$@" run_tuning diff --git a/examples/pytorch/eager/huggingface_models/README.md b/examples/pytorch/eager/huggingface_models/README.md index 8e757af08ba..93fb6ef6f1f 100644 --- a/examples/pytorch/eager/huggingface_models/README.md +++ b/examples/pytorch/eager/huggingface_models/README.md @@ -54,7 +54,7 @@ pip install torchvision==0.7.0 --no-deps ## 2. Prepare pretrained model -Before use Intel® Low Precision Optimization Tool, you should fine tune the model to get pretrained model, You should also install the additional packages required by the examples: +Before use Intel® Neural Compressor, you should fine tune the model to get pretrained model, You should also install the additional packages required by the examples: ### Text-classification @@ -85,7 +85,7 @@ The dev set results will be present within the text file 'eval_results.txt' in t please refer to [BERT base scripts and instructions](examples/text-classification/README.md#PyTorch version). -* After fine tuning, you can get a checkpoint dir which include pretrained model, tokenizer and training arguments. This checkpoint dir will be used by lpot tuning as below. +* After fine tuning, you can get a checkpoint dir which include pretrained model, tokenizer and training arguments. This checkpoint dir will be used by neural_compressor tuning as below. #### XLM-RoBERTa For BERT base and glue tasks(task name can be one of CoLA, SST-2, MRPC, STS-B, QQP, MNLI, QNLI, RTE, WNLI...) @@ -229,7 +229,7 @@ Where summarization dataset can be one of xsum,billsum etc. Where output_dir is path of checkpoint which be created by fine tuning. -* After fine tuning, you can get a checkpoint dir which include pretrained model, tokenizer and training arguments. This checkpoint dir will be used by lpot tuning as below. +* After fine tuning, you can get a checkpoint dir which include pretrained model, tokenizer and training arguments. This checkpoint dir will be used by neural_compressor tuning as below. #### Language-modeling ##### Finetune command @@ -255,7 +255,7 @@ python run_clm.py \ > > dataset_config_name : just for dialogpt: wikitext-2-raw-v1. -# Start to lpot tune for Model Quantization +# Start to neural_compressor tune for Model Quantization ```shell cd examples/pytorch/eager/huggingface_models ``` @@ -300,7 +300,7 @@ sh run_tuning.sh --topology=topology_name --input_model=/path/to/checkpoint/dir > /path/to/checkpoint/dir is the path to finetune output_dir -# Start to lpot tune for Model Pruning +# Start to neural_compressor tune for Model Pruning Below are example NLP tasks for model pruning together with task specific fine-tuning. It requires the pre-trained bert-base sparsity model `Intel/bert-base-uncased-sparse-70-unstructured` from Intel Huggingface portal. @@ -363,7 +363,7 @@ python examples/text-classification/run_glue_no_trainer_gradient_prune.py --do_prune --do_eval --output_model /path/to/output/ ``` -# Start to lpot tune for Model Distillation +# Start to neural_compressor tune for Model Distillation Below are example NLP tasks for model distillation from a task specific fine-tuned large model to a smaller model. It requires the pre-trained task specific model such as `textattack/roberta-base-SST-2` from textattack Huggingface portal. @@ -380,14 +380,14 @@ python examples/text-classification/run_glue_no_trainer_distillation.py \ --output_dir /path/to/output_dir --config distillation.yaml --seed 5143 ``` -Examples of enabling Intel® Low Precision Optimization Tool +Examples of enabling Intel® Neural Compressor ============================================================ -This is a tutorial of how to enable BERT model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable BERT model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports two usages: +Intel® Neural Compressor supports two usages: 1. User specifies fp32 'model', calibration dataset 'q_dataloader', evaluation dataset "eval_dataloader" and metrics in tuning.metrics field of model-specific yaml config file. 2. User specifies fp32 'model', calibration dataset 'q_dataloader' and a custom "eval_func" which encapsulates the evaluation dataset and metrics by itself. @@ -419,7 +419,7 @@ tuning: Here we set accuracy target as tolerating 0.01 relative accuracy loss of baseline. The default tuning strategy is basic strategy. The timeout 0 means early stop as well as a tuning config meet accuracy target. -> **Note** : lpot does NOT support "mse" tuning strategy for pytorch framework +> **Note** : neural_compressor does NOT support "mse" tuning strategy for pytorch framework ### Code Prepare @@ -427,7 +427,7 @@ We just need update run_squad_tune.py and run_glue_tune.py like below ```python if training_args.tune: - def eval_func_for_lpot(model_tuned): + def eval_func_for_nc(model_tuned): trainer = Trainer( model=model_tuned, args=training_args, @@ -445,14 +445,14 @@ if training_args.tune: acc = result[key] break return acc - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") calibration_dataset = quantizer.dataset('bert', dataset=eval_dataset, task="classifier", model_type=config.model_type) quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader( calibration_dataset, batch_size=training_args.per_device_eval_batch_size) - quantizer.eval_func = eval_func_for_lpot + quantizer.eval_func = eval_func_for_nc q_model = quantizer() q_model.save(training_args.tuned_checkpoint) exit(0) @@ -462,7 +462,7 @@ For seq2seq task,We need update run_seq2seq_tune.py like below ```python if training_args.tune: - def eval_func_for_lpot(model): + def eval_func_for_nc(model): trainer.model = model results = trainer.evaluate( eval_dataset=eval_dataset,metric_key_prefix="val", max_length=data_args.val_max_target_length, num_beams=data_args.eval_beams @@ -480,15 +480,15 @@ if training_args.tune: acc = sum([v for k,v in results.items() if "rouge" in k])/4 break return acc - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader( eval_dataset, batch_size=training_args.eval_batch_size, - collate_fn=Seq2SeqDataCollator_lpot(tokenizer, data_args, training_args.tpu_num_cores) + collate_fn=Seq2SeqDataCollator_nc(tokenizer, data_args, training_args.tpu_num_cores) ) - quantizer.eval_func = eval_func_for_lpot + quantizer.eval_func = eval_func_for_nc q_model = quantizer() q_model.save(training_args.tuned_checkpoint) exit(0) @@ -498,7 +498,7 @@ For language modeling task,We need update run_clm_tune.py like below ```python if training_args.tune: - def eval_func_for_lpot(model_tuned): + def eval_func_for_nc(model_tuned): trainer = Trainer( model=model_tuned, args=training_args, @@ -519,15 +519,15 @@ if training_args.tune: perplexity = results[key] break return 100-perplexity - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader( eval_dataset, batch_size=training_args.eval_batch_size, - collate_fn=default_data_collator_lpot + collate_fn=default_data_collator_nc ) - quantizer.eval_func = eval_func_for_lpot + quantizer.eval_func = eval_func_for_nc q_model = quantizer() q_model.save(training_args.tuned_checkpoint) exit(0) diff --git a/examples/pytorch/eager/huggingface_models/examples/language-modeling/run_clm_tune.py b/examples/pytorch/eager/huggingface_models/examples/language-modeling/run_clm_tune.py index a46d876f2c9..ae740356323 100755 --- a/examples/pytorch/eager/huggingface_models/examples/language-modeling/run_clm_tune.py +++ b/examples/pytorch/eager/huggingface_models/examples/language-modeling/run_clm_tune.py @@ -44,7 +44,7 @@ set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process -from transformers.data.data_collator import default_data_collator_lpot +from transformers.data.data_collator import default_data_collator_nc logger = logging.getLogger(__name__) @@ -366,7 +366,7 @@ def group_texts(examples): train_dataset = lm_datasets["train"] eval_dataset = lm_datasets["validation"] if training_args.tune: - def eval_func_for_lpot(model_tuned): + def eval_func_for_nc(model_tuned): trainer = Trainer( model=model_tuned, args=training_args, @@ -387,22 +387,22 @@ def eval_func_for_lpot(model_tuned): perplexity = results[key] break return 100-perplexity - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader( eval_dataset, batch_size=training_args.eval_batch_size, - collate_fn=default_data_collator_lpot + collate_fn=default_data_collator_nc ) - quantizer.eval_func = eval_func_for_lpot + quantizer.eval_func = eval_func_for_nc q_model = quantizer() q_model.save(training_args.tuned_checkpoint) exit(0) if training_args.accuracy_only: if training_args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(training_args.tuned_checkpoint)), model) else: @@ -432,7 +432,7 @@ def eval_func_for_lpot(model_tuned): if training_args.benchmark: if training_args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(training_args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/huggingface_models/examples/question-answering/run_qa_no_trainer_prune.py b/examples/pytorch/eager/huggingface_models/examples/question-answering/run_qa_no_trainer_prune.py index c53ea3dc710..491b6f5b485 100755 --- a/examples/pytorch/eager/huggingface_models/examples/question-answering/run_qa_no_trainer_prune.py +++ b/examples/pytorch/eager/huggingface_models/examples/question-answering/run_qa_no_trainer_prune.py @@ -772,7 +772,7 @@ def eval_func(model): post_processing_function, prune) if args.prune: - from lpot.experimental import Pruning, common + from neural_compressor.experimental import Pruning, common prune = Pruning(args.config) prune.model = common.Model(model) prune.train_dataloader = train_dataloader diff --git a/examples/pytorch/eager/huggingface_models/examples/seq2seq/run_seq2seq_tune.py b/examples/pytorch/eager/huggingface_models/examples/seq2seq/run_seq2seq_tune.py index ce8673ab2c7..4b69462164f 100755 --- a/examples/pytorch/eager/huggingface_models/examples/seq2seq/run_seq2seq_tune.py +++ b/examples/pytorch/eager/huggingface_models/examples/seq2seq/run_seq2seq_tune.py @@ -34,7 +34,7 @@ from transformers.training_args import ParallelMode from utils import ( Seq2SeqDataCollator, - Seq2SeqDataCollator_lpot, + Seq2SeqDataCollator_nc, Seq2SeqDataset, assert_all_frozen, build_compute_metrics_fn, @@ -316,7 +316,7 @@ def main(): tokenizer.save_pretrained(training_args.output_dir) if training_args.tune: - def eval_func_for_lpot(model): + def eval_func_for_nc(model): trainer.model = model results = trainer.evaluate( eval_dataset=eval_dataset,metric_key_prefix="val", max_length=data_args.val_max_target_length, num_beams=data_args.eval_beams @@ -334,22 +334,22 @@ def eval_func_for_lpot(model): acc = sum([v for k,v in results.items() if "rouge" in k])/4 break return acc - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader( eval_dataset, batch_size=training_args.eval_batch_size, - collate_fn=Seq2SeqDataCollator_lpot(tokenizer, data_args, training_args.tpu_num_cores) + collate_fn=Seq2SeqDataCollator_nc(tokenizer, data_args, training_args.tpu_num_cores) ) - quantizer.eval_func = eval_func_for_lpot + quantizer.eval_func = eval_func_for_nc q_model = quantizer() q_model.save(training_args.tuned_checkpoint) exit(0) if training_args.benchmark: if training_args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(training_args.tuned_checkpoint)), model) else: @@ -374,7 +374,7 @@ def eval_func_for_lpot(model): if training_args.accuracy_only: if training_args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(training_args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/huggingface_models/examples/seq2seq/utils.py b/examples/pytorch/eager/huggingface_models/examples/seq2seq/utils.py index 1fe515fea5c..691e11e7d24 100644 --- a/examples/pytorch/eager/huggingface_models/examples/seq2seq/utils.py +++ b/examples/pytorch/eager/huggingface_models/examples/seq2seq/utils.py @@ -329,7 +329,7 @@ def _encode(self, batch) -> Dict[str, torch.Tensor]: ) return batch_encoding.data -class Seq2SeqDataCollator_lpot(Seq2SeqDataCollator): +class Seq2SeqDataCollator_nc(Seq2SeqDataCollator): def __call__(self, batch) -> Dict[str, torch.Tensor]: if hasattr(self.tokenizer, "prepare_seq2seq_batch"): batch = self._encode(batch) diff --git a/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_distillation.py b/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_distillation.py index a1fee8cc1d6..110904aa68c 100755 --- a/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_distillation.py +++ b/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_distillation.py @@ -15,7 +15,7 @@ """ Finetuning a 🤗 Transformers model for sequence classification on GLUE.""" import argparse import logging -from lpot.utils.logger import log +from neural_compressor.utils.logger import log import math import os import random @@ -485,8 +485,8 @@ def get_logits(teacher_model, train_dataset): # Do distillation if args.do_distillation: - from lpot.experimental import Distillation, common - from lpot.experimental.common.criterion import PyTorchKnowledgeDistillationLoss + from neural_compressor.experimental import Distillation, common + from neural_compressor.experimental.common.criterion import PyTorchKnowledgeDistillationLoss def train_func(model): return take_train_steps(args, model, train_dataloader, lr_scheduler, distiller) diff --git a/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_gradient_prune.py b/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_gradient_prune.py index 79713ce1937..fd4e529517e 100755 --- a/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_gradient_prune.py +++ b/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_gradient_prune.py @@ -498,7 +498,7 @@ def preprocess_function(examples): prune_eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size) prune_eval_dataloader = tqdm(prune_eval_dataloader, desc="Evaluating") - from lpot.experimental import Pruning, common + from neural_compressor.experimental import Pruning, common def train_func(model): return take_train_steps(args, model, prune_eval_dataloader, metric, prune) diff --git a/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_prune.py b/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_prune.py index 3452720866f..64945b07c88 100755 --- a/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_prune.py +++ b/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_prune.py @@ -463,7 +463,7 @@ def eval_func(model): return take_eval_steps(args, model, eval_dataloader, metric, prune) if args.prune: - from lpot.experimental import Pruning, common + from neural_compressor.experimental import Pruning, common prune = Pruning(args.config) prune.model = common.Model(model) prune.train_dataloader = train_dataloader diff --git a/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_tune.py b/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_tune.py index 24afeff2c1c..c3ed2694d01 100755 --- a/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_tune.py +++ b/examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_tune.py @@ -394,7 +394,7 @@ def compute_metrics(p: EvalPrediction): data_collator = None if training_args.tune: - def eval_func_for_lpot(model_tuned): + def eval_func_for_nc(model_tuned): trainer = Trainer( model=model_tuned, args=training_args, @@ -412,8 +412,8 @@ def eval_func_for_lpot(model_tuned): acc = result[key] break return acc - from lpot.experimental import Quantization, common - from transformers.data.data_collator import default_data_collator_lpot + from neural_compressor.experimental import Quantization, common + from transformers.data.data_collator import default_data_collator_nc quantizer = Quantization("./conf.yaml") # calibration_dataset = quantizer.dataset('bert', dataset=eval_dataset, # task="classifier", model_type=config.model_type) @@ -421,16 +421,16 @@ def eval_func_for_lpot(model_tuned): quantizer.calib_dataloader = common.DataLoader( eval_dataset, batch_size=training_args.per_device_eval_batch_size, - collate_fn=default_data_collator_lpot + collate_fn=default_data_collator_nc ) - quantizer.eval_func = eval_func_for_lpot + quantizer.eval_func = eval_func_for_nc q_model = quantizer() q_model.save(training_args.tuned_checkpoint) exit(0) if training_args.accuracy_only: if training_args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(training_args.tuned_checkpoint)), model) else: @@ -458,7 +458,7 @@ def eval_func_for_lpot(model_tuned): if training_args.benchmark: if training_args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(training_args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/huggingface_models/src/transformers/data/data_collator.py b/examples/pytorch/eager/huggingface_models/src/transformers/data/data_collator.py index 22d4d2cfa41..3a72e415fa2 100644 --- a/examples/pytorch/eager/huggingface_models/src/transformers/data/data_collator.py +++ b/examples/pytorch/eager/huggingface_models/src/transformers/data/data_collator.py @@ -81,7 +81,7 @@ def default_data_collator(features: List[InputDataClass]) -> Dict[str, torch.Ten return batch -def default_data_collator_lpot(features: List[InputDataClass]) -> Dict[str, torch.Tensor]: +def default_data_collator_nc(features: List[InputDataClass]) -> Dict[str, torch.Tensor]: """ Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named: diff --git a/examples/pytorch/eager/huggingface_models/src/transformers/training_args.py b/examples/pytorch/eager/huggingface_models/src/transformers/training_args.py index 4a4d24ab5a1..109dbb8801c 100644 --- a/examples/pytorch/eager/huggingface_models/src/transformers/training_args.py +++ b/examples/pytorch/eager/huggingface_models/src/transformers/training_args.py @@ -508,7 +508,7 @@ class TrainingArguments: _n_gpu: int = field(init=False, repr=False, default=-1) tune: bool = field( default=False, - metadata={"help": "Whether to run lpot tuning."}) + metadata={"help": "Whether to run neural_compressor tuning."}) benchmark: bool = field( default=False, metadata={"help": "run benchmark."}) @@ -517,13 +517,13 @@ class TrainingArguments: metadata={"help":"run benchmark."}) tuned_checkpoint: str = field( default="./", - metadata={"help":"path to checkpoint tuned by Low Precision Optimization Tool (default: ./)."}) + metadata={"help":"path to checkpoint tuned by Neural Compressor (default: ./)."}) accuracy_only: bool = field( default=False, - metadata={"help":"Whether to only test accuracy for model tuned by Low Precision Optimization Tool."}) + metadata={"help":"Whether to only test accuracy for model tuned by Neural Compressor."}) iters: int = field( default=0, - metadata={"help":"Iteration number for Low Precision Optimization Tool benchmark/accuracy test."} + metadata={"help":"Iteration number for Neural Compressor benchmark/accuracy test."} ) warmup_iter: int = field( default=5, diff --git a/examples/pytorch/eager/image_recognition/efficientnet/README.md b/examples/pytorch/eager/image_recognition/efficientnet/README.md index 6f2d0f33ad1..bf051953f58 100644 --- a/examples/pytorch/eager/image_recognition/efficientnet/README.md +++ b/examples/pytorch/eager/image_recognition/efficientnet/README.md @@ -44,20 +44,20 @@ cd examples/pytorch/eager/image_recognition/efficientnet python validate.py --tune --model mobilenetv3_rw --no-cuda --pretrained /path/to/imagenet ``` -Examples of enabling Intel® Low Precision Optimization Tool auto tuning on PyTorch ResNet +Examples of enabling Intel® Neural Compressor auto tuning on PyTorch ResNet ========================================================================================== This is a tutorial of how to enable a PyTorch classification model. # User Code Analysis -Intel® Low Precision Optimization Tool supports three usages: +Intel® Neural Compressor supports three usages: 1. User only provide fp32 "model", and configure calibration dataset, evaluation dataset and metric in model-specific yaml config file. 2. User provide fp32 "model", calibration dataset "q_dataloader" and evaluation dataset "eval_dataloader", and configure metric in tuning.metric field of model-specific yaml config file. 3. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -As Efficientnet series are typical classification models, use Top-K as metric which is built-in supported by Intel® Low Precision Optimization Tool. So here we integrate PyTorch Efficientnet with Intel® Low Precision Optimization Tool by the first use case for simplicity. +As Efficientnet series are typical classification models, use Top-K as metric which is built-in supported by Intel® Neural Compressor. So here we integrate PyTorch Efficientnet with Intel® Neural Compressor by the first use case for simplicity. ### Write Yaml Config File @@ -85,8 +85,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: @@ -148,7 +148,7 @@ After prepare step is done, we just need update main.py like below. ```python model.eval() model.fuse_model() -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf_efficientnet_b0.yaml") quantizer.model = common.Model(model) q_model = quantizer() diff --git a/examples/pytorch/eager/image_recognition/efficientnet/conf_efficientnet_b0.yaml b/examples/pytorch/eager/image_recognition/efficientnet/conf_efficientnet_b0.yaml index 138a09612f6..10fc5968abe 100644 --- a/examples/pytorch/eager/image_recognition/efficientnet/conf_efficientnet_b0.yaml +++ b/examples/pytorch/eager/image_recognition/efficientnet/conf_efficientnet_b0.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/efficientnet/conf_mobilenetv3_rw.yaml b/examples/pytorch/eager/image_recognition/efficientnet/conf_mobilenetv3_rw.yaml index f50cd7b22f5..9f61007e819 100644 --- a/examples/pytorch/eager/image_recognition/efficientnet/conf_mobilenetv3_rw.yaml +++ b/examples/pytorch/eager/image_recognition/efficientnet/conf_mobilenetv3_rw.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/efficientnet/validate.py b/examples/pytorch/eager/image_recognition/efficientnet/validate.py index 59c7095875a..5bd12b1f8e5 100644 --- a/examples/pytorch/eager/image_recognition/efficientnet/validate.py +++ b/examples/pytorch/eager/image_recognition/efficientnet/validate.py @@ -64,7 +64,7 @@ help='use tensorflow mnasnet preporcessing') parser.add_argument('--no-cuda', dest='no_cuda', action='store_true', help='') -parser.add_argument('--tune', action='store_true', help='int8 quantization tune with lpot') +parser.add_argument('--tune', action='store_true', help='int8 quantization tune with neural_compressor') parser.add_argument('-i', "--iter", default=0, type=int, help='For accuracy measurement only.') parser.add_argument('-w', "--warmup_iter", default=5, type=int, @@ -72,7 +72,7 @@ parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool' + help='path to checkpoint tuned by Neural Compressor' ' (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark for int8') @@ -121,7 +121,7 @@ def main(): model.eval() model.fuse_model() conf_yaml = "conf_" + args.model + ".yaml" - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(conf_yaml) quantizer.model = common.Model(model) q_model = quantizer() @@ -149,7 +149,7 @@ def main(): model.eval() model.fuse_model() if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/distillation/conf.yaml b/examples/pytorch/eager/image_recognition/imagenet/cpu/distillation/conf.yaml index 8e30d7be2a6..536e4db0bca 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/distillation/conf.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/distillation/conf.yaml @@ -50,8 +50,8 @@ distillation: loss_types: ['CE', 'KL'] loss_weights: [0.5, 0.5] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/distillation/main.py b/examples/pytorch/eager/image_recognition/imagenet/cpu/distillation/main.py index 995591db70e..27c72855057 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/distillation/main.py +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/distillation/main.py @@ -6,7 +6,7 @@ import warnings import torch import torchvision.models.resnet as models -from lpot.utils import logger +from neural_compressor.utils import logger model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith("__") @@ -86,7 +86,7 @@ def main_worker(args): print("=> no checkpoint found at '{}'".format(args.resume)) if args.distillation: - from lpot.experimental import Distillation, common + from neural_compressor.experimental import Distillation, common distiller = Distillation(args.config) distiller.model = common.Model(model) diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/distributed/conf_buildin.yaml b/examples/pytorch/eager/image_recognition/imagenet/cpu/distributed/conf_buildin.yaml index 53f766850b5..2ba38ea4a07 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/distributed/conf_buildin.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/distributed/conf_buildin.yaml @@ -46,8 +46,8 @@ quantization: # optional. required for QA CrossEntropyLoss: reduction: mean -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/distributed/main_buildin.py b/examples/pytorch/eager/image_recognition/imagenet/cpu/distributed/main_buildin.py index 10f7c49ae82..f245b34585e 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/distributed/main_buildin.py +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/distributed/main_buildin.py @@ -64,7 +64,7 @@ parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') @@ -122,7 +122,7 @@ def main_worker(args): print("=> no checkpoint found at '{}'".format(args.resume)) model.module.fuse_model() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(args.config) quantizer.model = common.Model(model) q_model = quantizer() diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune/conf.yaml b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune/conf.yaml index b00ef008f14..ced330a8cfc 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune/conf.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune/conf.yaml @@ -66,8 +66,8 @@ pruning: update_frequency: 2 names: ['layer1.0.conv2.weight'] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune/main.py b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune/main.py index 263bbabd3ad..0a8b411571e 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune/main.py +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune/main.py @@ -76,7 +76,7 @@ def main_worker(args): print("=> no checkpoint found at '{}'".format(args.resume)) if args.prune: - from lpot.experimental import Pruning, common + from neural_compressor.experimental import Pruning, common prune = Pruning(args.config) prune.model = common.Model(model) diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/README.md b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/README.md index d1fed906782..37f213ec35b 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/README.md +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/README.md @@ -1,12 +1,12 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch ResNet50 prune and PTQ results with Intel® Low Precision Optimization Tool(LPOT). +This document describes the step-by-step instructions for reproducing PyTorch ResNet50 prune and PTQ results with Intel® Neural Compressor. > **Note** > > * PyTorch quantization implementation in imperative path has limitation on automatically execution. It requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> * LPOT supposes user have done these two steps before invoking LPOT interface. +> * Neural Compressor supposes user have done these two steps before invoking Neural Compressor interface. > For details, please refer to https://pytorch.org/docs/stable/quantization.html # Prerequisite @@ -55,7 +55,7 @@ python main.py -t -a resnet50 --pretrained /path/to/imagenet In examples directory, there are two yaml templates `prune_conf.yaml` and `ptq_conf.yaml` which are used in pruning and post training quantization. User could some of the items in yaml and only keep mandatory item. -LPOT defined Scheduler to do both prune and PTQ in one turn. It is sufficient to add following lines of code to execute pruning and PTQ in scheduler. +Neural Compressor defined Scheduler to do both prune and PTQ in one turn. It is sufficient to add following lines of code to execute pruning and PTQ in scheduler. ``` prune = Pruning('./prune_conf.yaml') quantizer = Quantization('./ptq_conf.yaml') diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/main.py b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/main.py index 66c91a67ff0..5c1e883a253 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/main.py +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/main.py @@ -43,7 +43,7 @@ parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') def main(): args = parser.parse_args() @@ -70,9 +70,9 @@ def main(): print('using CPU...') if args.tune and args.prune: - from lpot.experimental.scheduler import Scheduler - from lpot.experimental import Quantization, Pruning, common - from lpot.adaptor.tf_utils.util import is_saved_model_format, is_ckpt_format + from neural_compressor.experimental.scheduler import Scheduler + from neural_compressor.experimental import Quantization, Pruning, common + from neural_compressor.adaptor.tf_utils.util import is_saved_model_format, is_ckpt_format prune = Pruning('./prune_conf.yaml') quantizer = Quantization('./ptq_conf.yaml') scheduler = Scheduler() @@ -84,7 +84,7 @@ def main(): return elif args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common model.eval() model.fuse_model() quantizer = Quantization("./ptq_conf.yaml") @@ -94,7 +94,7 @@ def main(): return elif args.prune: - from lpot.experimental import Pruning, common + from neural_compressor.experimental import Pruning, common prune = Pruning('./prune_conf.yaml') prune.model = common.Model(model) diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/prune_conf.yaml b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/prune_conf.yaml index d87a20142a1..bf770744f8f 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/prune_conf.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/prune_conf.yaml @@ -56,8 +56,8 @@ pruning: prune_type: basic_magnitude names: ['layer1.0.conv1.weight', 'layer1.0.conv2.weight'] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/ptq_conf.yaml b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/ptq_conf.yaml index 4386c6f9737..c6b60803bd2 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/ptq_conf.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/ptq_conf.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/requirements.txt b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/requirements.txt index b6bbfd4a50b..891fd04fd12 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/requirements.txt +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/prune_and_ptq/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.5.0+cpu torchvision==0.6.0+cpu diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/README.md b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/README.md index a7e9fce2955..d80aa73f138 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/README.md +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/README.md @@ -1,12 +1,12 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch ResNet50/ResNet18/ResNet101 tuning results with Intel® Low Precision Optimization Tool(LPOT). +This document describes the step-by-step instructions for reproducing PyTorch ResNet50/ResNet18/ResNet101 tuning results with Intel® Neural Compressor. > **Note** > > * PyTorch quantization implementation in imperative path has limitation on automatically execution. It requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> * LPOT supposes user have done these two steps before invoking LPOT interface. +> * Neural Compressor supposes user have done these two steps before invoking Neural Compressor interface. > For details, please refer to https://pytorch.org/docs/stable/quantization.html # Prerequisite @@ -73,26 +73,26 @@ python main.py -t -a mobilenet_v2 --pretrained /path/to/imagenet # Saving and loading model: * Saving model: - After tuning with LPOT, we can get LPOT.model: + After tuning with Neural Compressor, we can get neural_compressor.model: ``` -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) -lpot_model = quantizer() +nc_model = quantizer() ``` -Here, lpot_model is LPOT model class, so it has "save" API: +Here, nc_model is Neural Compressor model class, so it has "save" API: ```python -lpot_model.save("Path_to_save_configure_file") +nc_model.save("Path_to_save_configure_file") ``` * loading model: ```python model # fp32 model -from lpot.utils.pytorch import load +from neural_compressor.utils.pytorch import load quantized_model = load( os.path.join(Path, 'best_configure.yaml'), os.path.join(Path, 'best_model_weights.pt'), model) @@ -100,20 +100,20 @@ quantized_model = load( Please refer to [Sample code](./main.py). -Examples of enabling LPOT auto tuning on PyTorch ResNet +Examples of enabling Neural Compressor auto tuning on PyTorch ResNet ======================================================= -This is a tutorial of how to enable a PyTorch classification model with LPOT. +This is a tutorial of how to enable a PyTorch classification model with Neural Compressor. # User Code Analysis -LPOT supports three usages: +Neural Compressor supports three usages: 1. User only provide fp32 "model", and configure calibration dataset, evaluation dataset and metric in model-specific yaml config file. 2. User provide fp32 "model", calibration dataset "q_dataloader" and evaluation dataset "eval_dataloader", and configure metric in tuning.metric field of model-specific yaml config file. 3. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -As ResNet18/50/101 series are typical classification models, use Top-K as metric which is built-in supported by LPOT. So here we integrate PyTorch ResNet with LPOT by the first use case for simplicity. +As ResNet18/50/101 series are typical classification models, use Top-K as metric which is built-in supported by Neural Compressor. So here we integrate PyTorch ResNet with Neural Compressor by the first use case for simplicity. ### Write Yaml Config File @@ -206,7 +206,7 @@ After prepare step is done, we just need update main.py like below. ```python model.eval() model.module.fuse_model() -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) q_model = quantizer() @@ -216,7 +216,7 @@ The quantizer() function will return a best quantized model during timeout const ### Dump tensors for debug -LPOT can dump every layer output tensor which you specify in evaluation. You just need to add some setting to yaml configure file as below: +Neural Compressor can dump every layer output tensor which you specify in evaluation. You just need to add some setting to yaml configure file as below: ```yaml tensorboard: true diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/conf.yaml b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/conf.yaml index 4386c6f9737..c6b60803bd2 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/conf.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/conf.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/conf_dump_tensors.yaml b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/conf_dump_tensors.yaml index 57625907cf4..c3db18e98d3 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/conf_dump_tensors.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/conf_dump_tensors.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/main.py b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/main.py index cf53656ec3f..0b507f29ec6 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/main.py +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/main.py @@ -92,7 +92,7 @@ parser.add_argument('-r', "--accuracy_only", dest='accuracy_only', action='store_true', help='For accuracy measurement only.') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') @@ -265,7 +265,7 @@ def main_worker(gpu, ngpus_per_node, args): validate(val_loader, model, criterion, args) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common model.eval() model.fuse_model() quantizer = Quantization("./conf.yaml") @@ -278,7 +278,7 @@ def main_worker(gpu, ngpus_per_node, args): model.eval() model.fuse_model() if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/main_dump_tensors.py b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/main_dump_tensors.py index 509d79e93bc..7f0551e4edb 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/main_dump_tensors.py +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/main_dump_tensors.py @@ -238,7 +238,7 @@ def main_worker(gpu, ngpus_per_node, args): if args.tune: model.eval() model.module.fuse_model() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf_dump_tensors.yaml") quantizer.model = common.Model(model) q_model = quantizer() diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/requirements.txt b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/requirements.txt index b6bbfd4a50b..891fd04fd12 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/requirements.txt +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/ptq/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.5.0+cpu torchvision==0.6.0+cpu diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/README.md b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/README.md index 5c35aaea3f9..03dd5915dbc 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/README.md +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/README.md @@ -1,12 +1,12 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch ResNet50/ResNet18/ResNet101 tuning results with Intel® Low Precision Optimization Tool. +This document describes the step-by-step instructions for reproducing PyTorch ResNet50/ResNet18/ResNet101 tuning results with Intel® Neural Compressor. > **Note** > > * PyTorch eager mode quantization implementation requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> * LPOT requires users to complete these two manual steps before triggering auto-tuning process. +> * Neural Compressor requires users to complete these two manual steps before triggering auto-tuning process. > For details, please refer to https://pytorch.org/docs/stable/quantization.html # Prerequisite @@ -26,21 +26,21 @@ ls /path/to/imagenet train val ``` -Examples Of Enabling LPOT Auto Tuning On PyTorch ResNet +Examples Of Enabling Neural Compressor Auto Tuning On PyTorch ResNet ======================================================= -This is a tutorial of how to enable a PyTorch classification model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable a PyTorch classification model with Intel® Neural Compressor. ### User Code Analysis -For quantization aware training mode, Intel® Low Precision Optimization Tool supports four usage as below: +For quantization aware training mode, Intel® Neural Compressor supports four usage as below: 1. User specifies fp32 "model", training function "q_func", evaluation dataset "eval_dataloader" and metric in tuning.metric field of model-specific yaml config file, this option does not require customer to implement evaluation function. 2. User specifies fp32 "model", training function "q_func" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself, this option require customer implement evaluation function by himself. -3. User specifies fp32 "model", "calibration_dataloader", "eval_dataloader", and metric, optimizer, criterion in model-specific yaml config file. LPOT will construct buildin training function and evaluation function this option. -4. User specifies fp32 "model", "calibration_dataloader", a custom "eval_func", and optimizer, criterion in model-specific yaml config file. LPOT will only construct buildin evaluation function this option. +3. User specifies fp32 "model", "calibration_dataloader", "eval_dataloader", and metric, optimizer, criterion in model-specific yaml config file. Neural Compressor will construct buildin training function and evaluation function this option. +4. User specifies fp32 "model", "calibration_dataloader", a custom "eval_func", and optimizer, criterion in model-specific yaml config file. Neural Compressor will only construct buildin evaluation function this option. -As ResNet18/50/101 series are typical classification models, use Top-K as metric which is built-in supported by Intel® Low Precision Optimization Tool. So here we integrate PyTorch ResNet with Intel® Low Precision Optimization Tool by the first or third use cases for simplicity. +As ResNet18/50/101 series are typical classification models, use Top-K as metric which is built-in supported by Intel® Neural Compressor. So here we integrate PyTorch ResNet with Intel® Neural Compressor by the first or third use cases for simplicity. ### With buildin training function @@ -94,7 +94,7 @@ After prepare step is done, we just need update main.py like below. ```python model.module.fuse_model() -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization(args.config) quantizer.model = common.Model(model) quantizer.calib_dataloader = train_loader @@ -154,7 +154,7 @@ The related code please refer to examples/pytorch/eager/image_recognition/imagen After prepare step is done, we just need update main.py like below. ```python -def training_func_for_lpot(model): +def training_func_for_nc(model): epochs = 8 optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) prev_loss = 100 @@ -197,10 +197,10 @@ def training_func_for_lpot(model): model.apply(torch.nn.intrinsic.qat.freeze_bn_stats) return model.module.fuse_model() -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) -quantizer.q_func = training_func_for_lpot +quantizer.q_func = training_func_for_nc quantizer.eval_dataloader = val_loader q_model = quantizer() ``` diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/conf.yaml b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/conf.yaml index 953b2c8a4da..5243be00557 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/conf.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/conf.yaml @@ -20,8 +20,8 @@ model: # mandatory. used to specif quantization: # optional. required for QAT and PTQ. approach: quant_aware_training # mandatory. supported values are quant_aware_training and post_training_static_quant. -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/conf_buildin.yaml b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/conf_buildin.yaml index 6daccd644d9..d50cdb1e89b 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/conf_buildin.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/conf_buildin.yaml @@ -29,8 +29,8 @@ quantization: # optional. required for QA CrossEntropyLoss: reduction: mean -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/main.py b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/main.py index 2290b7bfbfc..e8a50f0c8b1 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/main.py +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/main.py @@ -90,7 +90,7 @@ parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') @@ -258,7 +258,7 @@ def main_worker(gpu, ngpus_per_node, args): return if args.tune: - def training_func_for_lpot(model): + def training_func_for_nc(model): epochs = 8 optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) prev_loss = None @@ -304,10 +304,10 @@ def training_func_for_lpot(model): return model.module.fuse_model() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(args.config) quantizer.model = common.Model(model) - quantizer.q_func = training_func_for_lpot + quantizer.q_func = training_func_for_nc quantizer.eval_dataloader = val_loader q_model = quantizer() q_model.save(args.tuned_checkpoint) @@ -317,7 +317,7 @@ def training_func_for_lpot(model): model.eval() model.module.fuse_model() if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/main_buildin.py b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/main_buildin.py index 77c1210dc8f..bcd043d4b4c 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/main_buildin.py +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/main_buildin.py @@ -88,7 +88,7 @@ parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') @@ -248,7 +248,7 @@ def main_worker(gpu, ngpus_per_node, args): if args.tune: model.module.fuse_model() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(args.config) quantizer.model = common.Model(model) quantizer.calib_dataloader = train_loader @@ -261,7 +261,7 @@ def main_worker(gpu, ngpus_per_node, args): model.eval() model.module.fuse_model() if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/requirements.txt b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/requirements.txt index b6bbfd4a50b..891fd04fd12 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/requirements.txt +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.5.0+cpu torchvision==0.6.0+cpu diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/README.md b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/README.md index b427296da47..6b7bece622f 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/README.md +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/README.md @@ -1,12 +1,12 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch ResNet50 prune and QAT results with Intel® Low Precision Optimization Tool(LPOT). +This document describes the step-by-step instructions for reproducing PyTorch ResNet50 prune and QAT results with Intel® Neural Compressor. > **Note** > > * PyTorch quantization implementation in imperative path has limitation on automatically execution. It requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> * LPOT supposes user have done these two steps before invoking LPOT interface. +> * Neural Compressor supposes user have done these two steps before invoking Neural Compressor interface. > For details, please refer to https://pytorch.org/docs/stable/quantization.html # Prerequisite @@ -42,7 +42,7 @@ python main.py -t -a resnet50 --pretrained /path/to/imagenet In examples directory, there are two yaml templates `prune_conf.yaml` and `qat_conf.yaml` which are used in pruning and quantization aware training. User could some of the items in yaml and only keep mandatory item. -LPOT defined Scheduler to do QAT during prune in one turn. It is sufficient to add following lines of code to execute pruning and QAT in scheduler. +Neural Compressor defined Scheduler to do QAT during prune in one turn. It is sufficient to add following lines of code to execute pruning and QAT in scheduler. ``` quantizer = Quantization('./qat_conf.yaml') prune = Pruning('./prune_conf.yaml') diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/main.py b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/main.py index dfe086c8cbb..7242484c5df 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/main.py +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/main.py @@ -44,7 +44,7 @@ parser.add_argument('-i', "--iter", default=0, type=int, help='For accuracy measurement only.') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') @@ -66,8 +66,8 @@ def main(): if args.tune: model.fuse_model() - from lpot.experimental import Quantization, common, Pruning, Component - from lpot.experimental.scheduler import Scheduler + from neural_compressor.experimental import Quantization, common, Pruning, Component + from neural_compressor.experimental.scheduler import Scheduler quantizer = Quantization('./qat_conf.yaml') prune = Pruning('./prune_conf.yaml') scheduler = Scheduler() diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/prune_conf.yaml b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/prune_conf.yaml index 3d9070893b6..20ac6fe80a5 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/prune_conf.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/prune_conf.yaml @@ -58,8 +58,8 @@ pruning: prune_type: basic_magnitude names: ['layer1.0.conv1.weight', 'layer1.0.conv2.weight'] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/qat_conf.yaml b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/qat_conf.yaml index 647c9d1eafc..51ae178f984 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/qat_conf.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/qat_conf.yaml @@ -45,8 +45,8 @@ quantization: # optional. required for QA CrossEntropyLoss: reduction: mean -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. diff --git a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/requirements.txt b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/requirements.txt index 5a604ca6b39..1b69b5b0e7a 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/requirements.txt +++ b/examples/pytorch/eager/image_recognition/imagenet/cpu/qat_during_prune/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.8.0+cpu torchvision==0.9.0+cpu diff --git a/examples/pytorch/eager/image_recognition/imagenet/gpu/conf.yaml b/examples/pytorch/eager/image_recognition/imagenet/gpu/conf.yaml index e7631f76817..ab105c7657d 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/gpu/conf.yaml +++ b/examples/pytorch/eager/image_recognition/imagenet/gpu/conf.yaml @@ -42,8 +42,8 @@ quantization: # optional. tuning constrai }, } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/imagenet/gpu/main.py b/examples/pytorch/eager/image_recognition/imagenet/gpu/main.py index 1ce1dd4c5a6..f8db915c977 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/gpu/main.py +++ b/examples/pytorch/eager/image_recognition/imagenet/gpu/main.py @@ -252,7 +252,7 @@ def main_worker(gpu, ngpus_per_node, args): if args.tune: model.eval() model.module.fuse_model() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) q_model = quantizer() diff --git a/examples/pytorch/eager/image_recognition/imagenet/gpu/requirements.txt b/examples/pytorch/eager/image_recognition/imagenet/gpu/requirements.txt index 4828e85bf9c..b3624165493 100644 --- a/examples/pytorch/eager/image_recognition/imagenet/gpu/requirements.txt +++ b/examples/pytorch/eager/image_recognition/imagenet/gpu/requirements.txt @@ -1,3 +1,3 @@ -lpot +neural-compressor --find-links https://download.pytorch.org/whl/torch_stable.html torchvision==0.6.0+cpu diff --git a/examples/pytorch/eager/image_recognition/mnist/mnist.py b/examples/pytorch/eager/image_recognition/mnist/mnist.py index 0a779937a1c..b56abb82cc7 100644 --- a/examples/pytorch/eager/image_recognition/mnist/mnist.py +++ b/examples/pytorch/eager/image_recognition/mnist/mnist.py @@ -136,7 +136,7 @@ def test_func(model): return test(model, test_loader) - from lpot.experimental import Component, common + from neural_compressor.experimental import Component, common component = Component(args.config) component.model = common.Model(model) component.train_func = train_func diff --git a/examples/pytorch/eager/image_recognition/peleenet/README.md b/examples/pytorch/eager/image_recognition/peleenet/README.md index fe260d359dd..91c6df5321a 100644 --- a/examples/pytorch/eager/image_recognition/peleenet/README.md +++ b/examples/pytorch/eager/image_recognition/peleenet/README.md @@ -1,12 +1,12 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch PeleeNet tuning results with Intel® Low Precision Optimization Tool. +This document describes the step-by-step instructions for reproducing PyTorch PeleeNet tuning results with Intel® Neural Compressor. > **Note** > > * PyTorch quantization implementation in imperative path has limitation on automatically execution. It requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> * Intel® Low Precision Optimization Tool supposes user have done these two steps before invoking Intel® Low Precision Optimization Tool interface. +> * Intel® Neural Compressor supposes user have done these two steps before invoking Intel® Neural Compressor interface. > For details, please refer to https://pytorch.org/docs/stable/quantization.html # Prerequisite @@ -39,20 +39,20 @@ cd examples/pytorch/eager/image_recognition/peleenet python main.py --tune --pretrained -j 1 /path/to/imagenet ``` -Examples of enabling Intel® Low Precision Optimization Tool auto tuning on PyTorch ResNet +Examples of enabling Intel® Neural Compressor auto tuning on PyTorch ResNet ========================================================================================== -This is a tutorial of how to enable a PyTorch classification model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable a PyTorch classification model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports three usages: +Intel® Neural Compressor supports three usages: 1. User only provide fp32 "model", and configure calibration dataset, evaluation dataset and metric in model-specific yaml config file. 2. User provide fp32 "model", calibration dataset "q_dataloader" and evaluation dataset "eval_dataloader", and configure metric in tuning.metric field of model-specific yaml config file. 3. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -As PeleeNet are typical classification models, use Top-K as metric and imagenet dataset which are built-in supported by Intel® Low Precision Optimization Tool. So here we integrate PyTorch PeleeNet with Intel® Low Precision Optimization Tool by the first use case for simplicity. +As PeleeNet are typical classification models, use Top-K as metric and imagenet dataset which are built-in supported by Intel® Neural Compressor. So here we integrate PyTorch PeleeNet with Intel® Neural Compressor by the first use case for simplicity. ### Write Yaml Config File @@ -81,8 +81,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: @@ -146,7 +146,7 @@ After prepare step is done, we just need update main.py like below. ```python model.module.fuse() -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) q_model = quantizer() diff --git a/examples/pytorch/eager/image_recognition/peleenet/conf.yaml b/examples/pytorch/eager/image_recognition/peleenet/conf.yaml index 3f5642fbb94..0d037d0269d 100644 --- a/examples/pytorch/eager/image_recognition/peleenet/conf.yaml +++ b/examples/pytorch/eager/image_recognition/peleenet/conf.yaml @@ -52,8 +52,8 @@ quantization: # optional. tuning constrai } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/peleenet/main.py b/examples/pytorch/eager/image_recognition/peleenet/main.py index 2c4012d631c..942472a1765 100644 --- a/examples/pytorch/eager/image_recognition/peleenet/main.py +++ b/examples/pytorch/eager/image_recognition/peleenet/main.py @@ -62,7 +62,7 @@ parser.add_argument('--dist-backend', default='gloo', type=str, help='distributed backend') parser.add_argument('--tune', action='store_true', - help='int8 quantization tune with Low Precision Optimization Tool') + help='int8 quantization tune with Neural Compressor') parser.add_argument('--weights', type=str, metavar='PATH', default='weights/peleenet_acc7208.pth.tar', help='path to init checkpoint (default: none)') @@ -73,7 +73,7 @@ parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool' + help='path to checkpoint tuned by Neural Compressor' ' (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark for int8') @@ -163,7 +163,7 @@ def main(): if args.tune: model.eval() model.module.fuse_model() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) q_model = quantizer() @@ -174,7 +174,7 @@ def main(): model.eval() model.module.fuse_model() if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/image_recognition/peleenet/requirements.txt b/examples/pytorch/eager/image_recognition/peleenet/requirements.txt index b6bbfd4a50b..891fd04fd12 100644 --- a/examples/pytorch/eager/image_recognition/peleenet/requirements.txt +++ b/examples/pytorch/eager/image_recognition/peleenet/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.5.0+cpu torchvision==0.6.0+cpu diff --git a/examples/pytorch/eager/image_recognition/resnest/README.md b/examples/pytorch/eager/image_recognition/resnest/README.md index fe0e62286ba..d72cd12d107 100644 --- a/examples/pytorch/eager/image_recognition/resnest/README.md +++ b/examples/pytorch/eager/image_recognition/resnest/README.md @@ -1,12 +1,12 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch ResNest50 tuning results with Intel® Low Precision Optimization Tool. +This document describes the step-by-step instructions for reproducing PyTorch ResNest50 tuning results with Intel® Neural Compressor. > **Note** > > * PyTorch quantization implementation in imperative path has limitation on automatically execution. It requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> * Intel® Low Precision Optimization Tool supposes user have done these two steps before invoking Intel® Low Precision Optimization Tool interface. +> * Intel® Neural Compressor supposes user have done these two steps before invoking Intel® Neural Compressor interface. > For details, please refer to https://pytorch.org/docs/stable/quantization.html # Prerequisite @@ -38,20 +38,20 @@ cd examples/pytorch/eager/image_recognition/resnest python -u scripts/torch/verify.py --tune --model resnest50 --batch-size what_you_want --workers 1 --no-cuda /path/to/imagenet ``` -Examples of enabling Intel® Low Precision Optimization Tool auto tuning on PyTorch ResNest +Examples of enabling Intel® Neural Compressor auto tuning on PyTorch ResNest =========================================================================================== -This is a tutorial of how to enable a PyTorch classification model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable a PyTorch classification model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports three usages: +Intel® Neural Compressor supports three usages: 1. User only provide fp32 "model", and configure calibration dataset, evaluation dataset and metric in model-specific yaml config file. 2. User provide fp32 "model", calibration dataset "q_dataloader" and evaluation dataset "eval_dataloader", and configure metric in tuning.metric field of model-specific yaml config file. 3. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -As ResNest series are typical classification models, use Top-K as metric which is built-in supported by Intel® Low Precision Optimization Tool. So here we integrate PyTorch ResNest with Intel® Low Precision Optimization Tool by the first use case for simplicity. +As ResNest series are typical classification models, use Top-K as metric which is built-in supported by Intel® Neural Compressor. So here we integrate PyTorch ResNest with Intel® Neural Compressor by the first use case for simplicity. ### Write Yaml config file @@ -80,8 +80,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: @@ -145,7 +145,7 @@ After prepare step is done, we just need update main.py like below. ```python model.fuse_model() -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) q_model = quantizer() diff --git a/examples/pytorch/eager/image_recognition/resnest/conf.yaml b/examples/pytorch/eager/image_recognition/resnest/conf.yaml index 8ed94e61ca0..54c649b380a 100644 --- a/examples/pytorch/eager/image_recognition/resnest/conf.yaml +++ b/examples/pytorch/eager/image_recognition/resnest/conf.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/resnest/requirements.txt b/examples/pytorch/eager/image_recognition/resnest/requirements.txt index aaa350e7928..e2122654f49 100644 --- a/examples/pytorch/eager/image_recognition/resnest/requirements.txt +++ b/examples/pytorch/eager/image_recognition/resnest/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor tqdm --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.6.0+cpu diff --git a/examples/pytorch/eager/image_recognition/resnest/scripts/torch/verify.py b/examples/pytorch/eager/image_recognition/resnest/scripts/torch/verify.py index e341bf5be8d..2f048277c9a 100644 --- a/examples/pytorch/eager/image_recognition/resnest/scripts/torch/verify.py +++ b/examples/pytorch/eager/image_recognition/resnest/scripts/torch/verify.py @@ -55,7 +55,7 @@ def __init__(self): parser.add_argument('--verify', type=str, default=None, help='put the path to resuming file if needed') parser.add_argument("--tune", action='store_true', - help="run Low Precision Optimization Tool to tune int8 acc.") + help="run Neural Compressor to tune int8 acc.") parser.add_argument('data', metavar='DIR', help='path to dataset') parser.add_argument('-i', '--iterations', default=0, type=int, metavar='N', @@ -65,7 +65,7 @@ def __init__(self): parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool' + help='path to checkpoint tuned by Neural Compressor' ' (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark for int8') @@ -134,7 +134,7 @@ def main(): model.fuse_model() if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) q_model = quantizer() @@ -142,7 +142,7 @@ def main(): exit(0) if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/image_recognition/se_resnext/README.md b/examples/pytorch/eager/image_recognition/se_resnext/README.md index 60855add862..311b007363c 100644 --- a/examples/pytorch/eager/image_recognition/se_resnext/README.md +++ b/examples/pytorch/eager/image_recognition/se_resnext/README.md @@ -6,7 +6,7 @@ This document is used to list steps of reproducing PyTorch se_resnext tuning zoo > **Note** > > * PyTorch quantization implementation in imperative path has limitation on automatically execution. It requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> * Intel® Low Precision Optimization Tool supposes user have done these two steps before invoking Intel® Low Precision Optimization Tool interface.For details, please refer to https://pytorch.org/docs/stable/quantization.html +> * Intel® Neural Compressor supposes user have done these two steps before invoking Intel® Neural Compressor interface.For details, please refer to https://pytorch.org/docs/stable/quantization.html # Prerequisite @@ -56,20 +56,20 @@ python examples/imagenet_eval.py \ -t ``` -Examples of enabling Intel® Low Precision Optimization Tool +Examples of enabling Intel® Neural Compressor ============================================================ -This is a tutorial of how to enable SE_ResNext model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable SE_ResNext model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports three usages: +Intel® Neural Compressor supports three usages: 1. User only provide fp32 "model", and configure calibration dataset, evaluation dataset and metric in model-specific yaml config file. 2. User provide fp32 "model", calibration dataset "q_dataloader" and evaluation dataset "eval_dataloader", and configure metric in tuning.metric field of model-specific yaml config file. 3. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -As SE_ResNext series are typical classification models, use Top-K as metric which is built-in supported by Intel® Low Precision Optimization Tool. So here we integrate PyTorch ResNet with Intel® Low Precision Optimization Tool by the first use case for simplicity. +As SE_ResNext series are typical classification models, use Top-K as metric which is built-in supported by Intel® Neural Compressor. So here we integrate PyTorch ResNet with Intel® Neural Compressor by the first use case for simplicity. ### Write Yaml Config File @@ -97,8 +97,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: @@ -145,7 +145,7 @@ tuning: Here we set accuracy target as tolerating 0.01 relative accuracy loss of baseline. The default tuning strategy is basic strategy. The timeout 0 means unlimited time for a tuning config meet accuracy target. -> **Note** : LPOT does NOT support "mse" tuning strategy for pytorch framework +> **Note** : Neural Compressor does NOT support "mse" tuning strategy for pytorch framework ### Prepare @@ -165,7 +165,7 @@ After prepare step is done, we just need update imagenet_eval.py like below if args.tune: model.eval() model.module.fuse_model() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) q_model = quantizer() diff --git a/examples/pytorch/eager/image_recognition/se_resnext/conf.yaml b/examples/pytorch/eager/image_recognition/se_resnext/conf.yaml index 80305a72547..b445f2e098d 100644 --- a/examples/pytorch/eager/image_recognition/se_resnext/conf.yaml +++ b/examples/pytorch/eager/image_recognition/se_resnext/conf.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/eager/image_recognition/se_resnext/examples/imagenet_eval.py b/examples/pytorch/eager/image_recognition/se_resnext/examples/imagenet_eval.py index eac1d4b1d5d..11c2c0823c5 100644 --- a/examples/pytorch/eager/image_recognition/se_resnext/examples/imagenet_eval.py +++ b/examples/pytorch/eager/image_recognition/se_resnext/examples/imagenet_eval.py @@ -66,7 +66,7 @@ parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool' + help='path to checkpoint tuned by Neural Compressor' ' (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark for int8') @@ -149,7 +149,7 @@ def main(): if args.tune: model.eval() model.module.fuse_model() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) q_model = quantizer() @@ -160,7 +160,7 @@ def main(): model.eval() model.module.fuse_model() if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/image_recognition/se_resnext/requirements.txt b/examples/pytorch/eager/image_recognition/se_resnext/requirements.txt index ae68515da67..2c10766accd 100644 --- a/examples/pytorch/eager/image_recognition/se_resnext/requirements.txt +++ b/examples/pytorch/eager/image_recognition/se_resnext/requirements.txt @@ -1,7 +1,7 @@ munch tqdm scipy -lpot +neural-compressor --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.5.0+cpu torchvision==0.6.0+cpu diff --git a/examples/pytorch/eager/language_translation/prune/distiller_bert.py b/examples/pytorch/eager/language_translation/prune/distiller_bert.py index db9a27f9fca..3f13aaf22ce 100644 --- a/examples/pytorch/eager/language_translation/prune/distiller_bert.py +++ b/examples/pytorch/eager/language_translation/prune/distiller_bert.py @@ -409,7 +409,7 @@ def eval_func(model): return take_eval_steps(args, model, tokenizer, prune) if args.prune: - from lpot.experimental import Pruning, common + from neural_compressor.experimental import Pruning, common prune = Pruning(args.config) prune.model = common.Model(model) prune.train_dataloader = train_dataloader diff --git a/examples/pytorch/eager/language_translation/ptq/README.md b/examples/pytorch/eager/language_translation/ptq/README.md index 5b1f078a053..d0137a84026 100644 --- a/examples/pytorch/eager/language_translation/ptq/README.md +++ b/examples/pytorch/eager/language_translation/ptq/README.md @@ -7,7 +7,7 @@ This document is used to list steps of reproducing PyTorch BERT tuning zoo resul > > 1. PyTorch quantization implementation in imperative path has limitation on automatically execution. > It requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> Intel® Low Precision Optimization Tool has no capability to solve this framework limitation. Intel® Low Precision Optimization Tool supposes user have done these two steps before invoking Intel® Low Precision Optimization Tool interface. +> Intel® Neural Compressor has no capability to solve this framework limitation. Intel® Neural Compressor supposes user have done these two steps before invoking Intel® Neural Compressor interface. > For details, please refer to https://pytorch.org/docs/stable/quantization.html > 2. The latest version of pytorch enabled INT8 layer_norm op, but the accuracy was regression. So you should tune BERT model on commit 24aac321718d58791c4e6b7cfa50788a124dae23. @@ -62,7 +62,7 @@ python setup.py install ### 3. Prepare pretrained model -Before use Intel® Low Precision Optimization Tool, you should fine tune the model to get pretrained model, You should also install the additional packages required by the examples: +Before use Intel® Neural Compressor, you should fine tune the model to get pretrained model, You should also install the additional packages required by the examples: ```shell cd examples/pytorch/eager/language_translation @@ -165,7 +165,7 @@ python $SQUAD_DIR/evaluate-v1.1.py $SQUAD_DIR/dev-v1.1.json ../models/wwm_uncase please refer to [BERT large SQuAD instructions](README.md#run_squadpy-fine-tuning-on-squad-for-question-answering) -* After fine tuning, you can get a checkpoint dir which include pretrained model, tokenizer and training arguments. This checkpoint dir will be used by lpot tuning as below. +* After fine tuning, you can get a checkpoint dir which include pretrained model, tokenizer and training arguments. This checkpoint dir will be used by neural_compressor tuning as below. #### GPT @@ -336,14 +336,14 @@ Where output_dir is path of checkpoint which be created by fine tuning. where task name can be one of CoLA, SST-2, MRPC, STS-B, QQP, MNLI, QNLI, RTE, WNLI. Where output_dir is path of checkpoint which be created by fine tuning. -Examples of enabling Intel® Low Precision Optimization Tool +Examples of enabling Intel® Neural Compressor ============================================================ -This is a tutorial of how to enable BERT model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable BERT model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports two usages: +Intel® Neural Compressor supports two usages: 1. User specifies fp32 'model', calibration dataset 'q_dataloader', evaluation dataset "eval_dataloader" and metrics in tuning.metrics field of model-specific yaml config file. 2. User specifies fp32 'model', calibration dataset 'q_dataloader' and a custom "eval_func" which encapsulates the evaluation dataset and metrics by itself. @@ -372,7 +372,7 @@ tuning: Here we set accuracy target as tolerating 0.01 relative accuracy loss of baseline. The default tuning strategy is basic strategy. The timeout 0 means early stop as well as a tuning config meet accuracy target. -> **Note** : lpot does NOT support "mse" tuning strategy for pytorch framework +> **Note** : neural_compressor does NOT support "mse" tuning strategy for pytorch framework ### prepare @@ -390,7 +390,7 @@ After prepare step is done, we just need update run_squad_tune.py and run_glue_t ```python if args.tune: - def eval_func_for_lpot(model): + def eval_func_for_nc(model): result, _ = evaluate(args, model, tokenizer) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) @@ -403,7 +403,7 @@ if args.tune: return acc eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=False) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") if eval_task != "squad": eval_task = 'classifier' @@ -412,7 +412,7 @@ if args.tune: quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader( eval_dataset, batch_size=args.eval_batch_size) - quantizer.eval_func = eval_func_for_lpot + quantizer.eval_func = eval_func_for_nc q_model = quantizer() q_model.save("PATH to saved model") exit(0) diff --git a/examples/pytorch/eager/language_translation/ptq/examples/requirements.txt b/examples/pytorch/eager/language_translation/ptq/examples/requirements.txt index adac47f7d67..60e6c3ca3b0 100644 --- a/examples/pytorch/eager/language_translation/ptq/examples/requirements.txt +++ b/examples/pytorch/eager/language_translation/ptq/examples/requirements.txt @@ -2,6 +2,6 @@ tensorboardX tensorboard scikit-learn seqeval -lpot +neural-compressor rsa>=4.7 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/examples/pytorch/eager/language_translation/ptq/examples/run_glue_tune.py b/examples/pytorch/eager/language_translation/ptq/examples/run_glue_tune.py index 070767f883b..32f7c908875 100644 --- a/examples/pytorch/eager/language_translation/ptq/examples/run_glue_tune.py +++ b/examples/pytorch/eager/language_translation/ptq/examples/run_glue_tune.py @@ -503,7 +503,7 @@ def main(): parser.add_argument("--do_bf16", action='store_true', help="run bf16 evaluation / training.") parser.add_argument("--tune", action='store_true', - help="run Low Precision Optimization Tool to tune int8 acc.") + help="run Neural Compressor to tune int8 acc.") parser.add_argument("--warmup", type=int, default=2, help="warmup for performance") parser.add_argument('-i', "--iter", default=0, type=int, @@ -515,7 +515,7 @@ def main(): parser.add_argument('-r', "--accuracy_only", dest='accuracy_only', action='store_true', help='For accuracy measurement only.') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') @@ -644,7 +644,7 @@ def main(): results.update(result) if args.tune: - def eval_func_for_lpot(model): + def eval_func_for_nc(model): result, perf = evaluate(args, model, tokenizer, prefix=prefix) bert_task_acc_keys = ['acc_and_f1', 'f1', 'mcc', 'spearmanr', 'acc'] for key in bert_task_acc_keys: @@ -670,7 +670,7 @@ def eval_func_for_lpot(model): from torch.utils import mkldnn as mkldnn_utils model = mkldnn_utils.to_mkldnn(model) print(model) - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(args.config) if eval_task != "squad": eval_task = 'classifier' @@ -679,7 +679,7 @@ def eval_func_for_lpot(model): quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader( eval_dataset, batch_size=args.eval_batch_size) - quantizer.eval_func = eval_func_for_lpot + quantizer.eval_func = eval_func_for_nc q_model = quantizer() q_model.save(args.tuned_checkpoint) exit(0) @@ -689,7 +689,7 @@ def eval_func_for_lpot(model): model.to(args.device) if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/language_translation/ptq/examples/run_lm_tune.py b/examples/pytorch/eager/language_translation/ptq/examples/run_lm_tune.py index 6851b7c35f2..4d077bda37d 100644 --- a/examples/pytorch/eager/language_translation/ptq/examples/run_lm_tune.py +++ b/examples/pytorch/eager/language_translation/ptq/examples/run_lm_tune.py @@ -525,7 +525,7 @@ def main(): parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") parser.add_argument("--tune", action='store_true', - help="run Low Precision Optimization Tool to tune int8 acc.") + help="run Neural Compressor to tune int8 acc.") parser.add_argument('-i', "--iter", default=0, type=int, help='For accuracy measurement only.') parser.add_argument('--config', type=str, default='conf.yaml', help="yaml config file") @@ -536,7 +536,7 @@ def main(): parser.add_argument('-r', "--accuracy_only", dest='accuracy_only', action='store_true', help='For accuracy measurement only.') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') args = parser.parse_args() @@ -658,7 +658,7 @@ def main(): results.update(result) if args.tune: - def eval_func_for_lpot(model): + def eval_func_for_nc(model): result = evaluate(args, model, tokenizer, prefix=prefix) return 100 - result['perplexity'].numpy() @@ -666,7 +666,7 @@ def eval_func_for_lpot(model): model.to(args.device) model.eval() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(args.config) eval_dataset = WikiDataset(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) @@ -675,7 +675,7 @@ def eval_func_for_lpot(model): eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) quantizer.model = common.Model(model) quantizer.calib_dataloader = eval_dataloader - quantizer.eval_func = eval_func_for_lpot + quantizer.eval_func = eval_func_for_nc q_model = quantizer() q_model.save(args.tuned_checkpoint) exit(0) @@ -685,7 +685,7 @@ def eval_func_for_lpot(model): model.to(args.device) if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/language_translation/ptq/examples/run_squad_tune.py b/examples/pytorch/eager/language_translation/ptq/examples/run_squad_tune.py index 43fb9a18638..316081aae42 100644 --- a/examples/pytorch/eager/language_translation/ptq/examples/run_squad_tune.py +++ b/examples/pytorch/eager/language_translation/ptq/examples/run_squad_tune.py @@ -523,7 +523,7 @@ def main(): parser.add_argument("--mkldnn_eval", action='store_true', help="evaluation with MKLDNN") parser.add_argument("--tune", action='store_true', - help="run Low Precision Optimization Tool to tune int8 acc.") + help="run Neural Compressor to tune int8 acc.") parser.add_argument("--task_name", default=None, type=str, required=True, help="SQuAD task") parser.add_argument("--warmup", type=int, default=5, @@ -536,7 +536,7 @@ def main(): parser.add_argument('-r', "--accuracy_only", dest='accuracy_only', action='store_true', help='For accuracy measurement only.') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') @@ -670,7 +670,7 @@ def main(): results.update(result) if args.tune: - def eval_func_for_lpot(model): + def eval_func_for_nc(model): result, _ = evaluate(args, model, tokenizer) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) @@ -687,14 +687,14 @@ def eval_func_for_lpot(model): dataset = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=False) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) eval_task = "squad" - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(args.config) dataset = quantizer.dataset('bert', dataset=dataset, task=eval_task, model_type=args.model_type) quantizer.model = common.Model(model) quantizer.calib_dataloader = common.DataLoader( dataset, batch_size=args.eval_batch_size) - quantizer.eval_func = eval_func_for_lpot + quantizer.eval_func = eval_func_for_nc q_model = quantizer() q_model.save(args.tuned_checkpoint) exit(0) @@ -703,7 +703,7 @@ def eval_func_for_lpot(model): model = model_class.from_pretrained(checkpoint, mix_qkv=True) model.to(args.device) if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/language_translation/ptq/requirements.txt b/examples/pytorch/eager/language_translation/ptq/requirements.txt index 83ca065a40e..d445fc9c3c8 100644 --- a/examples/pytorch/eager/language_translation/ptq/requirements.txt +++ b/examples/pytorch/eager/language_translation/ptq/requirements.txt @@ -10,7 +10,7 @@ regex sentencepiece # For XLM sacremoses -lpot +neural-compressor tensorboard scikit-learn diff --git a/examples/pytorch/eager/medical_imaging/3d-unet/Makefile b/examples/pytorch/eager/medical_imaging/3d-unet/Makefile index 9d656821791..50850421418 100644 --- a/examples/pytorch/eager/medical_imaging/3d-unet/Makefile +++ b/examples/pytorch/eager/medical_imaging/3d-unet/Makefile @@ -175,8 +175,8 @@ run_pytorch_performance: run_pytorch_accuracy: mkdir_postprocessed_data @python3 run.py --backend=pytorch --accuracy -.PHONY: run_pytorch_LPOT_tuning -run_pytorch_LPOT_tuning: mkdir_postprocessed_data +.PHONY: run_pytorch_NC_tuning +run_pytorch_NC_tuning: mkdir_postprocessed_data @python3 run.py --backend=pytorch --accuracy --tune --mlperf_conf=./mlperf.conf .PHONY: run_onnxruntime_performance diff --git a/examples/pytorch/eager/medical_imaging/3d-unet/README.md b/examples/pytorch/eager/medical_imaging/3d-unet/README.md index e94fb88972f..a054a7f57c4 100644 --- a/examples/pytorch/eager/medical_imaging/3d-unet/README.md +++ b/examples/pytorch/eager/medical_imaging/3d-unet/README.md @@ -1,8 +1,8 @@ # Introduction -This example is used to demonstrate 3D-Unet int8 accuracy by tuning with LPOT on PyTorch FBGEMM path. +This example is used to demonstrate 3D-Unet int8 accuracy by tuning with Neural Compressor on PyTorch FBGEMM path. -The 3D-Unet source code comes from [mlperf](https://github.com/mlcommons/inference/tree/master/vision/medical_imaging/3d-unet), commit SHA is **b7e8f0da170a421161410d18e5d2a05d75d6bccf**. [nnUnet](https://github.com/MIC-DKFZ/nnUNet) commit SHA is **b38c69b345b2f60cd0d053039669e8f988b0c0af**. User could diff them with this example to know which changes are made to integrate with LPOT. +The 3D-Unet source code comes from [mlperf](https://github.com/mlcommons/inference/tree/master/vision/medical_imaging/3d-unet), commit SHA is **b7e8f0da170a421161410d18e5d2a05d75d6bccf**. [nnUnet](https://github.com/MIC-DKFZ/nnUNet) commit SHA is **b38c69b345b2f60cd0d053039669e8f988b0c0af**. User could diff them with this example to know which changes are made to integrate with Neural Compressor. The model is performing [BraTS 2019](https://www.med.upenn.edu/cbica/brats2019/data.html) brain tumor segmentation task. @@ -50,7 +50,7 @@ The model is performing [BraTS 2019](https://www.med.upenn.edu/cbica/brats2019/d ## running cmd ```shell - make run_pytorch_LPOT_tuning + make run_pytorch_NC_tuning or diff --git a/examples/pytorch/eager/medical_imaging/3d-unet/run.py b/examples/pytorch/eager/medical_imaging/3d-unet/run.py index c9f03faade8..5913ce2988e 100644 --- a/examples/pytorch/eager/medical_imaging/3d-unet/run.py +++ b/examples/pytorch/eager/medical_imaging/3d-unet/run.py @@ -149,7 +149,7 @@ def eval_func(model): sys.path.insert(0, os.path.join(os.getcwd(), "nnUnet")) from nnunet.training.model_restore import load_model_and_checkpoint_files -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common import pickle def main(): @@ -188,15 +188,15 @@ def __len__(self): quantizer.eval_func = eval_func quantizer.calib_dataloader = common.DataLoader(CalibrationDL()) q_model = quantizer() - q_model.save('./lpot_workspace') + q_model.save('./nc_workspace') exit(0) if args.benchmark: model.eval() if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( - os.path.abspath(os.path.expanduser('./lpot_workspace')), model) + os.path.abspath(os.path.expanduser('./nc_workspace')), model) else: new_model = model eval_func(new_model) diff --git a/examples/pytorch/eager/object_detection/yolo_v3/README.md b/examples/pytorch/eager/object_detection/yolo_v3/README.md index 372e85b2e88..939149ef5bf 100644 --- a/examples/pytorch/eager/object_detection/yolo_v3/README.md +++ b/examples/pytorch/eager/object_detection/yolo_v3/README.md @@ -1,13 +1,13 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch YOLO v3 tuning results with Intel® Low Precision Optimization Tool(LPOT). +This document describes the step-by-step instructions for reproducing PyTorch YOLO v3 tuning results with Intel® Neural Compressor. > **Note** > > PyTorch quantization implementation in imperative path has limitation on automatically execution. > It requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> LPOT requires users to complete these two manual steps before triggering auto-tuning process. +> Neural Compressor requires users to complete these two manual steps before triggering auto-tuning process. > For details, please refer to https://pytorch.org/docs/stable/quantization.html # Prerequisite @@ -39,20 +39,20 @@ cd examples/pytorch/eager/object_detection/yolo_v3/ python test.py --weights_path weights/yolov3.weights -t ``` -Examples Of Enabling LPOT Auto Tuning On PyTorch YOLOV3 +Examples Of Enabling Neural Compressor Auto Tuning On PyTorch YOLOV3 ======================================================= -This is a tutorial of how to enable a PyTorch model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable a PyTorch model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports three usage as below: +Intel® Neural Compressor supports three usage as below: 1. User only provide fp32 "model", and configure calibration dataset, evaluation dataset and metric in model-specific yaml config file. 2. User provide fp32 "model", calibration dataset "q_dataloader" and evaluation dataset "eval_dataloader", and configure metric in tuning.metric field of model-specific yaml config file. 3. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -Here we integrate PyTorch YOLO V3 with Intel® Low Precision Optimization Tool by the third use case for simplicity. +Here we integrate PyTorch YOLO V3 with Intel® Neural Compressor by the third use case for simplicity. ### Write Yaml Config File @@ -117,15 +117,15 @@ def eval_func(model): return AP.mean() model.eval() model.fuse_model() -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common dataset = ListDataset(valid_path, img_size=opt.img_size, augment=False, multiscale=False) dataloader = torch.utils.data.DataLoader( dataset, batch_size=opt.batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn ) -lpot_dataloader = yolo_dataLoader(dataloader) +nc_dataloader = yolo_dataLoader(dataloader) quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) -quantizer.calib_dataloader = lpot_dataloader +quantizer.calib_dataloader = nc_dataloader quantizer.eval_func = eval_func q_model = quantizer() ``` diff --git a/examples/pytorch/eager/object_detection/yolo_v3/requirements.txt b/examples/pytorch/eager/object_detection/yolo_v3/requirements.txt index 34b59769c3f..d046593864f 100644 --- a/examples/pytorch/eager/object_detection/yolo_v3/requirements.txt +++ b/examples/pytorch/eager/object_detection/yolo_v3/requirements.txt @@ -5,7 +5,7 @@ tensorboard terminaltables pillow>=8.2.0 tqdm -lpot +neural-compressor --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.5.0+cpu torchvision==0.6.0+cpu diff --git a/examples/pytorch/eager/object_detection/yolo_v3/test.py b/examples/pytorch/eager/object_detection/yolo_v3/test.py index 3c960b941eb..6df5fe8067e 100644 --- a/examples/pytorch/eager/object_detection/yolo_v3/test.py +++ b/examples/pytorch/eager/object_detection/yolo_v3/test.py @@ -115,7 +115,7 @@ def evaluate(model, path, iou_thres, conf_thres, nms_thres, img_size, batch_size parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark for int8') opt = parser.parse_args() @@ -169,16 +169,16 @@ def eval_func(model): model.eval() model.fuse_model() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common dataset = ListDataset(valid_path, img_size=opt.img_size, augment=False, multiscale=False) dataloader = torch.utils.data.DataLoader( dataset, batch_size=opt.batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn ) - lpot_dataloader = yolo_dataLoader(dataloader) + nc_dataloader = yolo_dataLoader(dataloader) quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) quantizer.eval_func = eval_func - quantizer.calib_dataloader = lpot_dataloader + quantizer.calib_dataloader = nc_dataloader q_model = quantizer() q_model.save(opt.tuned_checkpoint) @@ -188,7 +188,7 @@ def eval_func(model): model.eval() model.fuse_model() if opt.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(opt.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/eager/recommendation/README.md b/examples/pytorch/eager/recommendation/README.md index 93928de7359..13022f06706 100644 --- a/examples/pytorch/eager/recommendation/README.md +++ b/examples/pytorch/eager/recommendation/README.md @@ -12,7 +12,7 @@ This document is used to list steps of reproducing PyTorch DLRM tuning zoo resul > > 1. PyTorch quantization implementation in imperative path has limitation on automatically execution. > It requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> Intel® Low Precision Optimization Tool has no capability to solve this framework limitation. Intel® Low Precision Optimization Tool supposes user have done these two steps before invoking Intel® Low Precision Optimization Tool interface. +> Intel® Neural Compressor has no capability to solve this framework limitation. Intel® Neural Compressor supposes user have done these two steps before invoking Intel® Neural Compressor interface. > For details, please refer to https://pytorch.org/docs/stable/quantization.html > 2. Please ensure your PC have >370G memory to run DLRM @@ -49,14 +49,14 @@ This document is used to list steps of reproducing PyTorch DLRM tuning zoo resul --load-model=${model_path} --tune ``` -Examples of enabling Intel® Low Precision Optimization Tool +Examples of enabling Intel® Neural Compressor ========================= -This is a tutorial of how to enable DLRM model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable DLRM model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports two usages: +Intel® Neural Compressor supports two usages: 1. User specifies fp32 'model', calibration dataset 'q_dataloader', evaluation dataset "eval_dataloader" and metrics in tuning.metrics field of model-specific yaml config file. @@ -81,7 +81,7 @@ tuning: random_seed: 9527 ``` Here we set accuracy target as tolerating 0.01 relative accuracy loss of baseline. The default tuning strategy is basic strategy. The timeout 0 means early stop as well as a tuning config meet accuracy target. -> **Note** : Intel® Low Precision Optimization Tool does NOT support "mse" tuning strategy for pytorch framework +> **Note** : Intel® Neural Compressor does NOT support "mse" tuning strategy for pytorch framework ### prepare PyTorch quantization requires two manual steps: @@ -118,7 +118,7 @@ dlrm.bot_l.insert(0, QuantStub()) dlrm.bot_l.append(DeQuantStub()) dlrm.top_l.insert(0, QuantStub()) dlrm.top_l.insert(len(dlrm.top_l) - 1, DeQuantStub()) -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(dlrm) quantizer.calib_dataloader = eval_dataloader diff --git a/examples/pytorch/eager/recommendation/dlrm_s_pytorch_tune.py b/examples/pytorch/eager/recommendation/dlrm_s_pytorch_tune.py index 2ac6059c4fb..58582614ca2 100644 --- a/examples/pytorch/eager/recommendation/dlrm_s_pytorch_tune.py +++ b/examples/pytorch/eager/recommendation/dlrm_s_pytorch_tune.py @@ -515,7 +515,7 @@ def __iter__(self): parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark for int8') args = parser.parse_args() @@ -904,7 +904,7 @@ def eval_func(model): dlrm.bot_l.append(DeQuantStub()) dlrm.top_l.insert(0, QuantStub()) dlrm.top_l.insert(len(dlrm.top_l) - 1, DeQuantStub()) - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(dlrm) quantizer.calib_dataloader = eval_dataloader @@ -927,7 +927,7 @@ def eval_func(model): dlrm.top_l.insert(0, QuantStub()) dlrm.top_l.insert(len(dlrm.top_l) - 1, DeQuantStub()) if args.do_int8_inference: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load import os dlrm = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), dlrm) diff --git a/examples/pytorch/eager/recommendation/requirements.txt b/examples/pytorch/eager/recommendation/requirements.txt index 1e9901ec986..5930da463f1 100644 --- a/examples/pytorch/eager/recommendation/requirements.txt +++ b/examples/pytorch/eager/recommendation/requirements.txt @@ -2,7 +2,7 @@ future numpy onnx pydot -lpot +neural-compressor scikit-learn tqdm --find-links https://download.pytorch.org/whl/torch_stable.html diff --git a/examples/pytorch/eager/recommendation/run_and_time.sh b/examples/pytorch/eager/recommendation/run_and_time.sh index de63b4d0887..9949381ee91 100755 --- a/examples/pytorch/eager/recommendation/run_and_time.sh +++ b/examples/pytorch/eager/recommendation/run_and_time.sh @@ -20,7 +20,7 @@ export fp32_load_path=/mnt/local_disk3/dataset/dlrm/dlrm_weight/terabyte_mlperf. # python -u dlrm_s_pytorch_tune.py --arch-sparse-feature-size=128 --arch-mlp-bot="13-512-256-128" --arch-mlp-top="1024-1024-512-256-1" --max-ind-range=40000000 --data-generation=dataset --data-set=terabyte --raw-data-file=${data_path}/day --processed-data-file=${data_path}/terabyte_processed.npz --loss-function=bce --round-targets=True --learning-rate=1.0 --mini-batch-size=2048 --print-freq=2048 --print-time --test-freq=102400 --test-mini-batch-size=16384 --test-num-workers=16 --memory-map --mlperf-logging --mlperf-auc-threshold=0.8025 --mlperf-bin-loader --mlperf-bin-shuffle --load-model=${fp32_load_path} --inference-only $dlrm_extra_option 2>&1 | tee run_terabyte_mlperf_pt_fp32inference.log # echo "int8 inference" # python -u dlrm_s_pytorch_tune.py --arch-sparse-feature-size=128 --arch-mlp-bot="13-512-256-128" --arch-mlp-top="1024-1024-512-256-1" --max-ind-range=40000000 --data-generation=dataset --data-set=terabyte --raw-data-file=${data_path}/day --processed-data-file=${data_path}/terabyte_processed.npz --loss-function=bce --round-targets=True --learning-rate=1.0 --mini-batch-size=2048 --print-freq=2048 --print-time --test-freq=102400 --test-mini-batch-size=16384 --test-num-workers=16 --memory-map --mlperf-logging --mlperf-auc-threshold=0.8025 --mlperf-bin-loader --mlperf-bin-shuffle --load-model=${fp32_load_path} --inference-only --do-int8-inference $dlrm_extra_option 2>&1 | tee run_terabyte_mlperf_pt_int8.log -echo "lpot tune" +echo "neural_compressor tune" python -u dlrm_s_pytorch_tune.py --arch-sparse-feature-size=128 --arch-mlp-bot="13-512-256-128" --arch-mlp-top="1024-1024-512-256-1" --max-ind-range=40000000 --data-generation=dataset --data-set=terabyte --raw-data-file=${data_path}/day --processed-data-file=${data_path}/terabyte_processed.npz --loss-function=bce --round-targets=True --learning-rate=1.0 --mini-batch-size=2048 --print-freq=2048 --print-time --test-freq=102400 --test-mini-batch-size=16384 --test-num-workers=16 --memory-map --mlperf-logging --mlperf-auc-threshold=0.8025 --mlperf-bin-loader --mlperf-bin-shuffle --load-model=${fp32_load_path} --tune $dlrm_extra_option 2>&1 | tee run_terabyte_mlperf_pt_int8.log echo "done" diff --git a/examples/pytorch/eager/speech_recognition/rnnt/README.md b/examples/pytorch/eager/speech_recognition/rnnt/README.md index 592c1fcc52b..fc74e6aceb9 100644 --- a/examples/pytorch/eager/speech_recognition/rnnt/README.md +++ b/examples/pytorch/eager/speech_recognition/rnnt/README.md @@ -1,7 +1,7 @@ Step-by-Step ============ -This document list steps of reproducing Intel Optimized PyTorch RNNT models tuning results via LPOT. +This document list steps of reproducing Intel Optimized PyTorch RNNT models tuning results via Neural Compressor. Our example comes from MLPerf Inference Benchmark Suite @@ -43,13 +43,13 @@ Our example comes from MLPerf Inference Benchmark Suite # Run -### 1. Enable RNNT example with the auto dynamic quantization strategy of LPOT. +### 1. Enable RNNT example with the auto dynamic quantization strategy of Neural Compressor. The changes made are as follows: 1. add conf.yaml: This file contains the configuration of quantization. 2. run.py->run_tune.py: - we added lpot support in it. + we added neural_compressor support in it. 3. edit pytorch_SUT.py: remove jit script convertion 4. edit pytorch/decoders.py: diff --git a/examples/pytorch/eager/speech_recognition/rnnt/requirements.txt b/examples/pytorch/eager/speech_recognition/rnnt/requirements.txt index 417876800c3..8e99d3faae4 100644 --- a/examples/pytorch/eager/speech_recognition/rnnt/requirements.txt +++ b/examples/pytorch/eager/speech_recognition/rnnt/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor sox absl-py toml diff --git a/examples/pytorch/eager/speech_recognition/rnnt/run_tune.py b/examples/pytorch/eager/speech_recognition/rnnt/run_tune.py index 4ea9f4d52fb..c3e8a1551ec 100644 --- a/examples/pytorch/eager/speech_recognition/rnnt/run_tune.py +++ b/examples/pytorch/eager/speech_recognition/rnnt/run_tune.py @@ -48,7 +48,7 @@ def get_args(): help='For accuracy measurement only.') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') args = parser.parse_args() return args @@ -121,8 +121,8 @@ def benchmark(model): print('Throughput: %.3f samples/sec' % (10**9/latency_per_sample)) if args.tune: - # Dynamic Quantization with LPOT - from lpot.experimental import Quantization, common + # Dynamic Quantization with Neural Compressor + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) quantizer.eval_func = eval_func @@ -130,7 +130,7 @@ def benchmark(model): q_model.save(args.tuned_checkpoint) elif args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load int8_model = load(os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) if args.accuracy_only: eval_func(int8_model) diff --git a/examples/pytorch/fx/huggingface_models/text_classification/qat/README.md b/examples/pytorch/fx/huggingface_models/text_classification/qat/README.md index 9388b5e2189..b6a00acf9ec 100755 --- a/examples/pytorch/fx/huggingface_models/text_classification/qat/README.md +++ b/examples/pytorch/fx/huggingface_models/text_classification/qat/README.md @@ -1,7 +1,7 @@ Step-by-Step ============ -This document list steps of reproducing Intel Optimized PyTorch bert-base-cased/uncased models tuning results via LPOT with quantization aware training. +This document list steps of reproducing Intel Optimized PyTorch bert-base-cased/uncased models tuning results via Neural Compressor with quantization aware training. Our example comes from [Huggingface/transformers](https://github.com/huggingface/transformers) @@ -35,13 +35,13 @@ PyTorch 1.8 is needed for pytorch_fx backend and huggingface/transformers. # Run -### 1. Enable bert-base-cased/uncased example with the auto quantization aware training strategy of LPOT. +### 1. Enable bert-base-cased/uncased example with the auto quantization aware training strategy of Neural Compressor. The changes made are as follows: 1. add conf_qat.yaml: This file contains the configuration of quantization. 2. edit run_glue_tune.py: - - For quantization, We used lpot in it. + - For quantization, We used neural_compressor in it. - For training, we enbaled early stop strategy. ### 2. To get the tuned model and its accuracy: diff --git a/examples/pytorch/fx/huggingface_models/text_classification/qat/requirements.txt b/examples/pytorch/fx/huggingface_models/text_classification/qat/requirements.txt index 085d1d39e0e..526fe78bc77 100644 --- a/examples/pytorch/fx/huggingface_models/text_classification/qat/requirements.txt +++ b/examples/pytorch/fx/huggingface_models/text_classification/qat/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor transformers == 4.10.0 datasets >= 1.8.0 sentencepiece != 0.1.92 diff --git a/examples/pytorch/fx/huggingface_models/text_classification/qat/run_glue_tune.py b/examples/pytorch/fx/huggingface_models/text_classification/qat/run_glue_tune.py index a79bde1ed9e..be147a7d66e 100755 --- a/examples/pytorch/fx/huggingface_models/text_classification/qat/run_glue_tune.py +++ b/examples/pytorch/fx/huggingface_models/text_classification/qat/run_glue_tune.py @@ -186,7 +186,7 @@ class ModelArguments: }, ) tune: bool = field( - default=False, metadata={"help": "tune quantized model with LPOT"} + default=False, metadata={"help": "tune quantized model with Neural Compressor"} ) int8: bool = field( default=False, metadata={"help": "use int8 model to get accuracy or benchmark"} @@ -524,9 +524,9 @@ def benchmark(model): print('Latency: %.3f ms' % (1000 / throughput)) print('Throughput: %.3f samples/sec' % result['eval_samples_per_second']) - # optimize and quantize with LPOT + # optimize and quantize with Neural Compressor if model_args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization('conf_qat.yaml') quantizer.eval_func = eval_func quantizer.q_func = train_func @@ -536,7 +536,7 @@ def benchmark(model): return if model_args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load model = load(training_args.output_dir, model) if model_args.benchmark: benchmark(model) diff --git a/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/README.md b/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/README.md index 92ff7b7cc8e..034c7dccc8c 100644 --- a/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/README.md +++ b/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/README.md @@ -1,7 +1,7 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch ResNet50/ResNet18/ResNet101 tuning results with Intel® Low Precision Optimization Tool(LPOT). +This document describes the step-by-step instructions for reproducing PyTorch ResNet50/ResNet18/ResNet101 tuning results with Intel® Neural Compressor. # Prerequisite @@ -62,25 +62,25 @@ python main.py -t -a mobilenet_v2 --pretrained /path/to/imagenet # Saving and loading model: * Saving model: - After tuning with LPOT, we can get LPOT.model: + After tuning with Neural Compressor, we can get neural_compressor.model: ``` -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) -lpot_model = quantizer() +nc_model = quantizer() ``` -Here, lpot_model is LPOT model class, so it has "save" API: +Here, nc_model is Neural Compressor model class, so it has "save" API: ```python -lpot_model.save("Path_to_save_configure_file") +nc_model.save("Path_to_save_configure_file") ``` * loading model: ```python -from lpot.utils.pytorch import load +from neural_compressor.utils.pytorch import load quantized_model = load( os.path.join(Path, 'best_configure.yaml'), os.path.join(Path, 'best_model_weights.pt'), fp32_model) @@ -89,20 +89,20 @@ quantized_model = load( Please refer to [Sample code](./main.py). -Examples of enabling LPOT auto tuning on PyTorch ResNet +Examples of enabling Neural Compressor auto tuning on PyTorch ResNet ======================================================= -This is a tutorial of how to enable a PyTorch classification model with LPOT. +This is a tutorial of how to enable a PyTorch classification model with Neural Compressor. # User Code Analysis -LPOT supports three usages: +Neural Compressor supports three usages: 1. User only provide fp32 "model", and configure calibration dataset, evaluation dataset and metric in model-specific yaml config file. 2. User provide fp32 "model", calibration dataset "q_dataloader" and evaluation dataset "eval_dataloader", and configure metric in tuning.metric field of model-specific yaml config file. 3. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -As ResNet18/50/101 series are typical classification models, use Top-K as metric which is built-in supported by LPOT. So here we integrate PyTorch ResNet with LPOT by the first use case for simplicity. +As ResNet18/50/101 series are typical classification models, use Top-K as metric which is built-in supported by Neural Compressor. So here we integrate PyTorch ResNet with Neural Compressor by the first use case for simplicity. ### Write Yaml Config File @@ -188,7 +188,7 @@ After prepare step is done, we just need update main.py like below. ```python model.eval() model.module.fuse_model() -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) q_model = quantizer() @@ -198,7 +198,7 @@ The quantizer() function will return a best quantized model during timeout const ### Dump tensors for debug -LPOT can dump every layer output tensor which you specify in evaluation. You just need to add some setting to yaml configure file as below: +Neural Compressor can dump every layer output tensor which you specify in evaluation. You just need to add some setting to yaml configure file as below: ```yaml tensorboard: true diff --git a/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/conf.yaml b/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/conf.yaml index bd943b56452..471aa8b046a 100644 --- a/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/conf.yaml +++ b/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/conf.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/main.py b/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/main.py index c359b769111..4bc8c829a2b 100644 --- a/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/main.py +++ b/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/main.py @@ -92,7 +92,7 @@ parser.add_argument('-r', "--accuracy_only", dest='accuracy_only', action='store_true', help='For accuracy measurement only.') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') @@ -266,7 +266,7 @@ def main_worker(gpu, ngpus_per_node, args): validate(val_loader, model, criterion, args) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common model.eval() quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) @@ -277,7 +277,7 @@ def main_worker(gpu, ngpus_per_node, args): if args.benchmark or args.accuracy_only: model.eval() if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/requirements.txt b/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/requirements.txt index 5a604ca6b39..1b69b5b0e7a 100644 --- a/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/requirements.txt +++ b/examples/pytorch/fx/image_recognition/imagenet/cpu/ptq/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.8.0+cpu torchvision==0.9.0+cpu diff --git a/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/README.md b/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/README.md index 7499c7b43e8..ce99dda270e 100644 --- a/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/README.md +++ b/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/README.md @@ -1,7 +1,7 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch ResNet50/ResNet18/ResNet101 tuning results with Intel® Low Precision Optimization Tool. +This document describes the step-by-step instructions for reproducing PyTorch ResNet50/ResNet18/ResNet101 tuning results with Intel® Neural Compressor. # Prerequisite @@ -47,19 +47,19 @@ cd examples/pytorch/fx/image_recognition/imagenet/qat python main.py -t -a resnext101_32x8d --pretrained --config /path/to/config_file /path/to/imagenet ``` -Examples Of Enabling LPOT Auto Tuning On PyTorch ResNet +Examples Of Enabling Neural Compressor Auto Tuning On PyTorch ResNet ======================================================= -This is a tutorial of how to enable a PyTorch classification model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable a PyTorch classification model with Intel® Neural Compressor. # User Code Analysis -For quantization aware training mode, Intel® Low Precision Optimization Tool supports two usage as below: +For quantization aware training mode, Intel® Neural Compressor supports two usage as below: 1. User specifies fp32 "model", training function "q_func", evaluation dataset "eval_dataloader" and metric in tuning.metric field of model-specific yaml config file, this option does not require customer to implement evaluation function. 2. User specifies fp32 "model", training function "q_func" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself, this option require customer implement evaluation function by himself. -As ResNet18/50/101 series are typical classification models, use Top-K as metric which is built-in supported by Intel® Low Precision Optimization Tool. So here we integrate PyTorch ResNet with Intel® Low Precision Optimization Tool by the first use case for simplicity. +As ResNet18/50/101 series are typical classification models, use Top-K as metric which is built-in supported by Intel® Neural Compressor. So here we integrate PyTorch ResNet with Intel® Neural Compressor by the first use case for simplicity. ### Write Yaml Config File @@ -100,7 +100,7 @@ The related code please refer to examples/pytorch/fx/image_recognition/imagenet/ After prepare step is done, we just need update main.py like below. ```python -def training_func_for_lpot(model): +def training_func_for_nc(model): epochs = 8 iters = 30 optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) @@ -125,10 +125,10 @@ def training_func_for_lpot(model): model.apply(torch.nn.intrinsic.qat.freeze_bn_stats) return model.module.fuse_model() -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) -quantizer.q_func = training_func_for_lpot +quantizer.q_func = training_func_for_nc quantizer.eval_dataloader = val_loader q_model = quantizer() ``` diff --git a/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/conf.yaml b/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/conf.yaml index 226bd9348d7..b6ba912fcbe 100644 --- a/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/conf.yaml +++ b/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/conf.yaml @@ -20,8 +20,8 @@ model: # mandatory. used to specif quantization: # optional. required for QAT and PTQ. approach: quant_aware_training # mandatory. supported values are quant_aware_training and post_training_static_quant. -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. diff --git a/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/main.py b/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/main.py index fa472002f98..88f73b3e51a 100644 --- a/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/main.py +++ b/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/main.py @@ -88,7 +88,7 @@ parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') @@ -249,7 +249,7 @@ def main_worker(gpu, ngpus_per_node, args): return if args.tune: - def training_func_for_lpot(model): + def training_func_for_nc(model): epochs = 8 iters = 30 optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) @@ -276,10 +276,10 @@ def training_func_for_lpot(model): return - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(args.config) quantizer.model = common.Model(model) - quantizer.q_func = training_func_for_lpot + quantizer.q_func = training_func_for_nc quantizer.eval_dataloader = val_loader q_model = quantizer() q_model.save(args.tuned_checkpoint) @@ -288,7 +288,7 @@ def training_func_for_lpot(model): if args.benchmark: model.eval() if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/requirements.txt b/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/requirements.txt index 5a604ca6b39..1b69b5b0e7a 100644 --- a/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/requirements.txt +++ b/examples/pytorch/fx/image_recognition/imagenet/cpu/qat/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.8.0+cpu torchvision==0.9.0+cpu diff --git a/examples/pytorch/fx/object_detection/maskrcnn/README.md b/examples/pytorch/fx/object_detection/maskrcnn/README.md index fc5ec314a1c..2689f8c1177 100644 --- a/examples/pytorch/fx/object_detection/maskrcnn/README.md +++ b/examples/pytorch/fx/object_detection/maskrcnn/README.md @@ -1,7 +1,7 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch MASK_RCNN tuning results with Intel® Low Precision Optimization Tool(LPOT). +This document describes the step-by-step instructions for reproducing PyTorch MASK_RCNN tuning results with Intel® Neural Compressor. # Prerequisite @@ -56,25 +56,25 @@ sh run_tuning.sh --output_model=/path/to/tuned_checkpoint # Saving and loading model: * Saving model: - After tuning with LPOT, we can get LPOT.model: + After tuning with Neural Compressor, we can get neural_compressor.model: ``` -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) -lpot_model = quantizer() +nc_model = quantizer() ``` -Here, lpot_model is LPOT model class, so it has "save" API: +Here, nc_model is Neural Compressor model class, so it has "save" API: ```python -lpot_model.save("Path_to_save_configure_file") +nc_model.save("Path_to_save_configure_file") ``` * loading model: ```python -from lpot.utils.pytorch import load +from neural_compressor.utils.pytorch import load quantized_model = load( os.path.join(Path, 'best_configure.yaml'), os.path.join(Path, 'best_model_weights.pt'), fp32_model) @@ -82,20 +82,20 @@ quantized_model = load( Please refer to [Sample code](./pytorch/tools/test_net.py) -Examples of enabling LPOT auto tuning on PyTorch ResNet +Examples of enabling Neural Compressor auto tuning on PyTorch ResNet ======================================================= -This is a tutorial of how to enable a PyTorch classification model with LPOT. +This is a tutorial of how to enable a PyTorch classification model with Neural Compressor. # User Code Analysis -LPOT supports three usages: +Neural Compressor supports three usages: 1. User only provide fp32 "model", and configure calibration dataset, evaluation dataset and metric in model-specific yaml config file. 2. User provide fp32 "model", calibration dataset "q_dataloader" and evaluation dataset "eval_dataloader", and configure metric in tuning.metric field of model-specific yaml config file. 3. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -Here we integrate PyTorch maskrcnn with Intel® Low Precision Optimization Tool by the third use case for simplicity. +Here we integrate PyTorch maskrcnn with Intel® Neural Compressor by the third use case for simplicity. ### Write Yaml Config File @@ -168,7 +168,7 @@ def eval_func(q_model): return results.results['bbox']['AP'] if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common model.eval() quantizer = Quantization("./conf.yaml") prepare_custom_config_dict = {"non_traceable_module_class": [ diff --git a/examples/pytorch/fx/object_detection/maskrcnn/pytorch/tools/test_net.py b/examples/pytorch/fx/object_detection/maskrcnn/pytorch/tools/test_net.py index 5de21881f66..cd63fe4ee92 100644 --- a/examples/pytorch/fx/object_detection/maskrcnn/pytorch/tools/test_net.py +++ b/examples/pytorch/fx/object_detection/maskrcnn/pytorch/tools/test_net.py @@ -55,7 +55,7 @@ def main(): parser.add_argument('-r', "--accuracy_only", dest='accuracy_only', action='store_true', help='For accuracy measurement only.') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./saved_results)') + help='path to checkpoint tuned by Neural Compressor (default: ./saved_results)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') parser.add_argument( @@ -155,7 +155,7 @@ def eval_func(q_model): MaskPostProcessor, FPN, RPNHead ]} if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed, is_calib=True) @@ -170,7 +170,7 @@ def eval_func(q_model): return if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load model = load(os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model, **{'prepare_custom_config_dict': prepare_custom_config_dict}) if args.benchmark: diff --git a/examples/pytorch/fx/object_detection/maskrcnn/requirements.txt b/examples/pytorch/fx/object_detection/maskrcnn/requirements.txt index df2170d7fcb..9db63c636fe 100644 --- a/examples/pytorch/fx/object_detection/maskrcnn/requirements.txt +++ b/examples/pytorch/fx/object_detection/maskrcnn/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor yacs --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.8.0+cpu diff --git a/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/README.md b/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/README.md index ea7172f2c09..8bae71b77a5 100755 --- a/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/README.md +++ b/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/README.md @@ -1,7 +1,7 @@ Step-by-Step ============ -This document list steps of reproducing Intel Optimized PyTorch ssd_resnet34 models tuning results via LPOT. +This document list steps of reproducing Intel Optimized PyTorch ssd_resnet34 models tuning results via Neural Compressor. Our example comes from MLPerf Inference Benchmark Suite @@ -51,7 +51,7 @@ GCC5 or above is needed. # Run -### 1. Enable ssd_resnet34 example with the auto dynamic quantization strategy of LPOT. +### 1. Enable ssd_resnet34 example with the auto dynamic quantization strategy of Neural Compressor. The changes made are as follows: 1. add conf.yaml: @@ -63,9 +63,9 @@ GCC5 or above is needed. anno_dir: convert_dataset/annotations/instances_val2017.json ``` Note: the npy file does not exist in current folder and will be generated after the progress is initialized. So please keep npy_dir path pointing to preprocessed/coco-1200-pt/NCHW/val2017/ in current folder, You can also use absolute path by adding your current path before preprocessed/coco-1200-pt/NCHW/val2017/. - Such as: /home/xxx/lpot/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/preprocessed/coco-1200-pt/NCHW/val2017/ + Such as: /home/xxx/neural_compressor/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/preprocessed/coco-1200-pt/NCHW/val2017/ 2. edit python/main.py: - we import lpot in it. + we import neural_compressor in it. 3. edit python/model/ssd_r34.py: we wrap functions with @torch.fx.wrap to avoid ops cannot be traced by fx mode. diff --git a/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/conf.yaml b/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/conf.yaml index 129389f2bbc..d55d54c6fe0 100644 --- a/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/conf.yaml +++ b/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/conf.yaml @@ -21,7 +21,7 @@ quantization: # optional. tuning constrai approach: post_training_static_quant # mandatory. default value is post_training_dynamic_quant. calibration: sampling_size: 50 100 500 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 1 dataset: COCONpy: diff --git a/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/python/main.py b/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/python/main.py index c63ddaf5f70..288163e0481 100755 --- a/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/python/main.py +++ b/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/python/main.py @@ -216,7 +216,7 @@ def get_args(): help='run benchmark') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') args = parser.parse_args() # don't use defaults in argparser. Instead we default to a dict, override that with a profile @@ -582,8 +582,8 @@ def benchmark(model): os.chdir(os.path.join(sys.path[0], "..")) if args.tune: - # Quantization with LPOT - from lpot.experimental import Quantization, common + # Quantization with Neural Compressor + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(raw_model) quantizer.eval_func = eval_func @@ -591,7 +591,7 @@ def benchmark(model): q_model.save(args.tuned_checkpoint) elif args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load int8_model = load(os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), raw_model) if args.accuracy: eval_func(int8_model) diff --git a/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/requirements.txt b/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/requirements.txt index e6544a57d32..07ab5f3cfdc 100644 --- a/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/requirements.txt +++ b/examples/pytorch/fx/object_detection/ssd_resnet34/ptq/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor absl-py --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.8.0+cpu diff --git a/examples/pytorch/fx/object_detection/ssd_resnet34/qat/README.md b/examples/pytorch/fx/object_detection/ssd_resnet34/qat/README.md index 1f6546b2ccd..463750b69f0 100755 --- a/examples/pytorch/fx/object_detection/ssd_resnet34/qat/README.md +++ b/examples/pytorch/fx/object_detection/ssd_resnet34/qat/README.md @@ -1,7 +1,7 @@ Step-by-Step ============ -This document list steps of reproducing Intel Optimized PyTorch ssd_resnet34 300*300 models tuning results via LPOT. +This document list steps of reproducing Intel Optimized PyTorch ssd_resnet34 300*300 models tuning results via Neural Compressor. Our example comes from MLPerf Training Inference Suite @@ -29,14 +29,14 @@ PyTorch 1.8 or higher version is needed with pytorch_fx backend. # Run -### 1. Enable ssd_resnet34 example with quant aware training strategy of LPOT. +### 1. Enable ssd_resnet34 example with quant aware training strategy of Neural Compressor. The changes made are as follows: 1. add conf.yaml: This file contains the configuration of quantization. 2. add ssd/main.py:\ - we add the eval_func and training_func_for_lpot with reference to https://github.com/mlcommons/training/blob/master/single_stage_detector/ssd/train.py \ - we import lpot in it. + we add the eval_func and training_func_for_nc with reference to https://github.com/mlcommons/training/blob/master/single_stage_detector/ssd/train.py \ + we import neural_compressor in it. 3. edit ssd/ssd300.py: we replace view() with reshape() in function bbox_view(). diff --git a/examples/pytorch/fx/object_detection/ssd_resnet34/qat/requirements.txt b/examples/pytorch/fx/object_detection/ssd_resnet34/qat/requirements.txt index 8cf411cdb07..6e6a77f7658 100755 --- a/examples/pytorch/fx/object_detection/ssd_resnet34/qat/requirements.txt +++ b/examples/pytorch/fx/object_detection/ssd_resnet34/qat/requirements.txt @@ -6,4 +6,4 @@ torchaudio==0.9.0 matplotlib pycocotools absl-py -lpot +neural-compressor diff --git a/examples/pytorch/fx/object_detection/ssd_resnet34/qat/ssd/main.py b/examples/pytorch/fx/object_detection/ssd_resnet34/qat/ssd/main.py index a263f3d2bcb..532881a028e 100644 --- a/examples/pytorch/fx/object_detection/ssd_resnet34/qat/ssd/main.py +++ b/examples/pytorch/fx/object_detection/ssd_resnet34/qat/ssd/main.py @@ -79,7 +79,7 @@ def parse_args(): parser.add_argument('--int8', action='store_true', help='int8') parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass") parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--warmup-inference', type=int, default=10, help='warmup for latency') parser.add_argument('--inference-iters', type=int, default=100, help='number of iterations for inference') @@ -300,7 +300,7 @@ def eval_func(model): return current_accuracy if args.tune: - def training_func_for_lpot(model): + def training_func_for_nc(model): current_lr = args.lr * (global_batch_size / 32) current_momentum = 0.9 optim = torch.optim.SGD(model.parameters(), lr=current_lr, @@ -394,18 +394,18 @@ def training_func_for_lpot(model): iter_num += 1 return - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(ssd300) quantizer.eval_func = eval_func - quantizer.q_func = training_func_for_lpot + quantizer.q_func = training_func_for_nc q_model = quantizer() q_model.save(args.tuned_checkpoint) if args.benchmark or args.accuracy: ssd300.eval() if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), ssd300) else: diff --git a/examples/pytorch/fx/recommendation/README.md b/examples/pytorch/fx/recommendation/README.md index e66d6127aa2..2949e84abe3 100644 --- a/examples/pytorch/fx/recommendation/README.md +++ b/examples/pytorch/fx/recommendation/README.md @@ -12,7 +12,7 @@ This document is used to list steps of reproducing PyTorch DLRM tuning zoo resul > > 1. PyTorch quantization implementation in imperative path has limitation on automatically execution. > It requires to manually add QuantStub and DequantStub for quantizable ops, it also requires to manually do fusion operation. -> Intel® Low Precision Optimization Tool has no capability to solve this framework limitation. Intel® Low Precision Optimization Tool supposes user have done these two steps before invoking Intel® Low Precision Optimization Tool interface. +> Intel® Neural Compressor has no capability to solve this framework limitation. Intel® Neural Compressor supposes user have done these two steps before invoking Intel® Neural Compressor interface. > For details, please refer to https://pytorch.org/docs/stable/quantization.html > 2. Please ensure your PC have >370G memory to run DLRM @@ -50,14 +50,14 @@ PyTorch 1.8 or higher version is needed with pytorch_fx backend. --load-model=${model_path} --tune ``` -Examples of enabling Intel® Low Precision Optimization Tool +Examples of enabling Intel® Neural Compressor ========================= -This is a tutorial of how to enable DLRM model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable DLRM model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports two usages: +Intel® Neural Compressor supports two usages: 1. User specifies fp32 'model', calibration dataset 'q_dataloader', evaluation dataset "eval_dataloader" and metrics in tuning.metrics field of model-specific yaml config file. @@ -82,7 +82,7 @@ tuning: random_seed: 9527 ``` Here we set accuracy target as tolerating 0.01 relative accuracy loss of baseline. The default tuning strategy is basic strategy. The timeout 0 means early stop as well as a tuning config meet accuracy target. -> **Note** : Intel® Low Precision Optimization Tool does NOT support "mse" tuning strategy for pytorch framework +> **Note** : Intel® Neural Compressor does NOT support "mse" tuning strategy for pytorch framework ### prepare PyTorch quantization requires two manual steps: @@ -119,7 +119,7 @@ dlrm.bot_l.insert(0, QuantStub()) dlrm.bot_l.append(DeQuantStub()) dlrm.top_l.insert(0, QuantStub()) dlrm.top_l.insert(len(dlrm.top_l) - 1, DeQuantStub()) -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(dlrm) quantizer.calib_dataloader = eval_dataloader diff --git a/examples/pytorch/fx/recommendation/dlrm_s_pytorch_tune.py b/examples/pytorch/fx/recommendation/dlrm_s_pytorch_tune.py index bdaac010299..52c58a38abf 100644 --- a/examples/pytorch/fx/recommendation/dlrm_s_pytorch_tune.py +++ b/examples/pytorch/fx/recommendation/dlrm_s_pytorch_tune.py @@ -520,7 +520,7 @@ def __iter__(self): parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark for int8') args = parser.parse_args() @@ -898,7 +898,7 @@ def eval_func(model): print('tune') eval_dataloader = DLRM_DataLoader(test_ld) dlrm.eval() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(dlrm) quantizer.calib_dataloader = eval_dataloader @@ -910,7 +910,7 @@ def eval_func(model): if args.benchmark: dlrm.eval() if args.int8: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load import os dlrm = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), dlrm) diff --git a/examples/pytorch/fx/recommendation/requirements.txt b/examples/pytorch/fx/recommendation/requirements.txt index 9e5b36b5e89..b28e34f1b73 100644 --- a/examples/pytorch/fx/recommendation/requirements.txt +++ b/examples/pytorch/fx/recommendation/requirements.txt @@ -2,7 +2,7 @@ future numpy onnx pydot -lpot +neural-compressor scikit-learn tqdm --find-links https://download.pytorch.org/whl/torch_stable.html diff --git a/examples/pytorch/fx/recommendation/run_and_time.sh b/examples/pytorch/fx/recommendation/run_and_time.sh index de63b4d0887..9949381ee91 100755 --- a/examples/pytorch/fx/recommendation/run_and_time.sh +++ b/examples/pytorch/fx/recommendation/run_and_time.sh @@ -20,7 +20,7 @@ export fp32_load_path=/mnt/local_disk3/dataset/dlrm/dlrm_weight/terabyte_mlperf. # python -u dlrm_s_pytorch_tune.py --arch-sparse-feature-size=128 --arch-mlp-bot="13-512-256-128" --arch-mlp-top="1024-1024-512-256-1" --max-ind-range=40000000 --data-generation=dataset --data-set=terabyte --raw-data-file=${data_path}/day --processed-data-file=${data_path}/terabyte_processed.npz --loss-function=bce --round-targets=True --learning-rate=1.0 --mini-batch-size=2048 --print-freq=2048 --print-time --test-freq=102400 --test-mini-batch-size=16384 --test-num-workers=16 --memory-map --mlperf-logging --mlperf-auc-threshold=0.8025 --mlperf-bin-loader --mlperf-bin-shuffle --load-model=${fp32_load_path} --inference-only $dlrm_extra_option 2>&1 | tee run_terabyte_mlperf_pt_fp32inference.log # echo "int8 inference" # python -u dlrm_s_pytorch_tune.py --arch-sparse-feature-size=128 --arch-mlp-bot="13-512-256-128" --arch-mlp-top="1024-1024-512-256-1" --max-ind-range=40000000 --data-generation=dataset --data-set=terabyte --raw-data-file=${data_path}/day --processed-data-file=${data_path}/terabyte_processed.npz --loss-function=bce --round-targets=True --learning-rate=1.0 --mini-batch-size=2048 --print-freq=2048 --print-time --test-freq=102400 --test-mini-batch-size=16384 --test-num-workers=16 --memory-map --mlperf-logging --mlperf-auc-threshold=0.8025 --mlperf-bin-loader --mlperf-bin-shuffle --load-model=${fp32_load_path} --inference-only --do-int8-inference $dlrm_extra_option 2>&1 | tee run_terabyte_mlperf_pt_int8.log -echo "lpot tune" +echo "neural_compressor tune" python -u dlrm_s_pytorch_tune.py --arch-sparse-feature-size=128 --arch-mlp-bot="13-512-256-128" --arch-mlp-top="1024-1024-512-256-1" --max-ind-range=40000000 --data-generation=dataset --data-set=terabyte --raw-data-file=${data_path}/day --processed-data-file=${data_path}/terabyte_processed.npz --loss-function=bce --round-targets=True --learning-rate=1.0 --mini-batch-size=2048 --print-freq=2048 --print-time --test-freq=102400 --test-mini-batch-size=16384 --test-num-workers=16 --memory-map --mlperf-logging --mlperf-auc-threshold=0.8025 --mlperf-bin-loader --mlperf-bin-shuffle --load-model=${fp32_load_path} --tune $dlrm_extra_option 2>&1 | tee run_terabyte_mlperf_pt_int8.log echo "done" diff --git a/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/README.md b/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/README.md index 235b69c0f6d..9e67fb6dd20 100644 --- a/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/README.md +++ b/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/README.md @@ -1,7 +1,7 @@ Step-by-Step ============ -This document describes the step-by-step instructions for reproducing PyTorch ResNet50/ResNet18/ResNet101 tuning results with Intel® Low Precision Optimization Tool(LPOT). +This document describes the step-by-step instructions for reproducing PyTorch ResNet50/ResNet18/ResNet101 tuning results with Intel® Neural Compressor. # Prerequisite @@ -104,19 +104,19 @@ bash run_benchmark.sh --topology=mobilenet_v2_ipex --dataset_location=/path/to/i # Saving and loading model: * Saving model: - After tuning with LPOT, we can get LPOT.model: + After tuning with Neural Compressor, we can get neural_compressor.model: ``` -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) -lpot_model = quantizer() +nc_model = quantizer() ``` -Here, lpot_model is LPOT model class, so it has "save" API: +Here, nc_model is Neural Compressor model class, so it has "save" API: ```python -lpot_model.save("Path_to_save_configure_file") +nc_model.save("Path_to_save_configure_file") ``` * loading model: @@ -141,20 +141,20 @@ with torch.no_grad(): Please refer to [Sample code](./main.py). -Examples of enabling LPOT auto tuning on PyTorch ResNet +Examples of enabling Neural Compressor auto tuning on PyTorch ResNet ======================================================= -This is a tutorial of how to enable a PyTorch classification model with LPOT. +This is a tutorial of how to enable a PyTorch classification model with Neural Compressor. # User Code Analysis -LPOT supports three usages: +Neural Compressor supports three usages: 1. User only provide fp32 "model", and configure calibration dataset, evaluation dataset and metric in model-specific yaml config file. 2. User provide fp32 "model", calibration dataset "q_dataloader" and evaluation dataset "eval_dataloader", and configure metric in tuning.metric field of model-specific yaml config file. 3. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -As ResNet18/50/101 series are typical classification models, use Top-K as metric which is built-in supported by LPOT. So here we integrate PyTorch ResNet with LPOT by the first use case for simplicity. +As ResNet18/50/101 series are typical classification models, use Top-K as metric which is built-in supported by Neural Compressor. So here we integrate PyTorch ResNet with Neural Compressor by the first use case for simplicity. ### Write Yaml Config File @@ -235,14 +235,14 @@ The related code please refer to examples/pytorch/ipex/image_recognition/imagene ### Tuning With Intel PyTorch Extension -1. Tuning With LPOT +1. Tuning With Neural Compressor ```python - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization("./conf_ipex.yaml") quantizer.model = common.Model(model) - lpot_model = quantizer() - lpot_model.save("Path_to_save_configure_file") + nc_model = quantizer() + nc_model.save("Path_to_save_configure_file") ``` 2. Saving and Run ipex model @@ -250,10 +250,10 @@ The related code please refer to examples/pytorch/ipex/image_recognition/imagene * Saving model ```python - lpot_model.save("Path_to_save_configure_file") + nc_model.save("Path_to_save_configure_file") ``` - Here, lpot_model is the result of LPOT tuning. It is LPOT.model class, so it has "save" API. + Here, nc_model is the result of Neural Compressor tuning. It is neural_compressor.model class, so it has "save" API. * Run ipex model: diff --git a/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/conf_ipex.yaml b/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/conf_ipex.yaml index 48b40d1189c..7b09dfb60ea 100644 --- a/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/conf_ipex.yaml +++ b/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/conf_ipex.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/main.py b/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/main.py index 42d5a7bd528..98dbcf8e373 100644 --- a/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/main.py +++ b/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/main.py @@ -101,7 +101,7 @@ parser.add_argument('-r', "--accuracy_only", dest='accuracy_only', action='store_true', help='For accuracy measurement only.') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') parser.add_argument('--int8', dest='int8', action='store_true', help='run benchmark') parser.add_argument('--ipex', dest='ipex', action='store_true', @@ -286,7 +286,7 @@ def main_worker(gpu, ngpus_per_node, args): validate(val_loader, model, criterion, args) if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common if args.ipex: quantizer = Quantization("./conf_ipex.yaml") else: @@ -315,7 +315,7 @@ def main_worker(gpu, ngpus_per_node, args): else: if pytorch_version < '1.7': model.fuse_model() - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load new_model = load( os.path.abspath(os.path.expanduser(args.tuned_checkpoint)), model) else: diff --git a/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/requirements.txt b/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/requirements.txt index 5a604ca6b39..1b69b5b0e7a 100644 --- a/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/requirements.txt +++ b/examples/pytorch/ipex/image_recognition/imagenet/cpu/ptq/requirements.txt @@ -1,4 +1,4 @@ -lpot +neural-compressor --find-links https://download.pytorch.org/whl/torch_stable.html torch==1.8.0+cpu torchvision==0.9.0+cpu diff --git a/examples/pytorch/ipex/speech_recognition/rnnt/README.md b/examples/pytorch/ipex/speech_recognition/rnnt/README.md index 0931a0b0bf1..e45e1abdc82 100644 --- a/examples/pytorch/ipex/speech_recognition/rnnt/README.md +++ b/examples/pytorch/ipex/speech_recognition/rnnt/README.md @@ -73,7 +73,7 @@ refer [intel/intel-extension-for-pytorch at mlperf/inference-1.1 (github.com)](h --output_json $local_data_dir/dev-clean-wav.json ``` -### 5. tune RNN-T with LPOT +### 5. tune RNN-T with Neural Compressor Please update the setup_env_offline.sh or setup_env_server.sh and user.conf according to your platform resource. ``` # offline diff --git a/examples/pytorch/ipex/speech_recognition/rnnt/run.py b/examples/pytorch/ipex/speech_recognition/rnnt/run.py index 22a2f2b7c4e..4e00fa0cdcd 100644 --- a/examples/pytorch/ipex/speech_recognition/rnnt/run.py +++ b/examples/pytorch/ipex/speech_recognition/rnnt/run.py @@ -49,13 +49,13 @@ def get_args(): parser.add_argument("--log_dir", required=True) parser.add_argument("--configure_path", default="") parser.add_argument('--tune', dest='tune', action='store_true', - help='tune best int8 model with LPOT on calibration dataset') + help='tune best int8 model with Neural Compressor on calibration dataset') parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') parser.add_argument("--accuracy_only", dest='accuracy_only', action='store_true', help='For accuracy measurement only.') parser.add_argument("--tuned_checkpoint", default='./saved_results', type=str, metavar='PATH', - help='path to checkpoint tuned by Low Precision Optimization Tool (default: ./)') + help='path to checkpoint tuned by Neural Compressor (default: ./)') args = parser.parse_args() return args @@ -97,7 +97,7 @@ def eval_func(model): fullpath = None use_int8 = False settings.mode = lg.TestMode.AccuracyOnly - for path, dirs, files in os.walk('lpot_workspace'): + for path, dirs, files in os.walk('nc_workspace'): if 'ipex_config_tmp.json' in files: fullpath = os.path.join(path, 'ipex_config_tmp.json') use_int8 = True @@ -116,15 +116,15 @@ def eval_func(model): return accu if args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common import shutil - shutil.rmtree('lpot_workspace', ignore_errors=True) + shutil.rmtree('nc_workspace', ignore_errors=True) sut = PytorchSUT(args.pytorch_config_toml, args.pytorch_checkpoint, args.dataset_dir, args.manifest, args.perf_count, True, False, None) model = sut.greedy_decoder._model.encoder - class LPOT_dataloader(object): + class NC_dataloader(object): def __init__(self, sut): self.sut = sut self.batch_size = 1 @@ -145,7 +145,7 @@ def __iter__(self): feature = feature.permute(2, 0, 1) yield (feature, feature_length), None - calib_dataloader = LPOT_dataloader(sut) + calib_dataloader = NC_dataloader(sut) quantizer = Quantization("./conf.yaml") quantizer.model = common.Model(model) quantizer.calib_dataloader = calib_dataloader @@ -158,7 +158,7 @@ def __iter__(self): config_file = None if args.int8: config_file = os.path.join(args.tuned_checkpoint, "best_configure.json") - assert os.path.exists(config_file), "there is no ipex config file, Please tune with LPOT first!" + assert os.path.exists(config_file), "there is no ipex config file, Please tune with Neural Compressor first!" sut = PytorchSUT(args.pytorch_config_toml, args.pytorch_checkpoint, args.dataset_dir, args.manifest, args.perf_count, args.bf16, args.int8, config_file) diff --git a/examples/tensorflow/image_recognition/README.md b/examples/tensorflow/image_recognition/README.md index 0efc44dcf4a..ceeb38be903 100644 --- a/examples/tensorflow/image_recognition/README.md +++ b/examples/tensorflow/image_recognition/README.md @@ -1,7 +1,7 @@ Step-by-Step ============ -This document list steps of reproducing Intel Optimized TensorFlow image recognition models tuning results via LPOT. +This document list steps of reproducing Intel Optimized TensorFlow image recognition models tuning results via Neural Compressor. > **Note**: > Most of those models are both supported in Intel optimized TF 1.15.x and Intel optimized TF 2.x. @@ -37,7 +37,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni ``` ### 3. Prepare pre-trained model - In this version, Intel® Low Precision Optimization Tool just support PB file as input for TensorFlow backend, so we need prepared model pre-trained pb files. For some models pre-trained pb can be found in [IntelAI Models](https://github.com/IntelAI/models/tree/v1.6.0/benchmarks#tensorflow-use-cases), we can found the download link in README file of each model. And for others models in Google [models](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models), we can get the pb files by convert the checkpoint files. We will give a example with Inception_v1 to show how to get the pb file by a checkpoint file. + In this version, Intel® Neural Compressor just support PB file as input for TensorFlow backend, so we need prepared model pre-trained pb files. For some models pre-trained pb can be found in [IntelAI Models](https://github.com/IntelAI/models/tree/v1.6.0/benchmarks#tensorflow-use-cases), we can found the download link in README file of each model. And for others models in Google [models](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models), we can get the pb files by convert the checkpoint files. We will give a example with Inception_v1 to show how to get the pb file by a checkpoint file. 1. Download the checkpoint file from [here](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models) ```shell @@ -98,7 +98,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni cd examples/tensorflow/image_recognition bash run_tuning.sh --config=resnet50_v1.yaml \ --input_model=/PATH/TO/resnet50_fp32_pretrained_model.pb \ - --output_model=./lpot_resnet50_v1.pb + --output_model=./nc_resnet50_v1.pb ``` ### 2. ResNet50 V1.5 @@ -111,7 +111,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni ```shell cd examples/tensorflow/image_recognition bash run_tuning.sh --config=resnet50_v1_5.yaml \ - --input_model=/PATH/TO/resnet50_v1.pb --output_model=./lpot_resnet50_v15.pb + --input_model=/PATH/TO/resnet50_v1.pb --output_model=./nc_resnet50_v15.pb ``` ### 3. ResNet101 @@ -125,7 +125,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni cd examples/tensorflow/image_recognition bash run_tuning.sh --config=resnet101.yaml \ --input_model=/PATH/TO/resnet101_fp32_pretrained_model.pb \ - --output_model=./lpot_resnet101.pb + --output_model=./nc_resnet101.pb ``` ### 4. MobileNet V1 @@ -139,7 +139,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni cd examples/tensorflow/image_recognition bash run_tuning.sh --config=mobilenet_v1.yaml \ --input_model=/PATH/TO/mobilenet_v1_1.0_224_frozen.pb \ - --output_model=./lpot_mobilenetv1.pb + --output_model=./nc_mobilenetv1.pb ``` ### 5. MobileNet V2* @@ -148,7 +148,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni cd examples/tensorflow/image_recognition bash run_tuning.sh --config=mobilenet_v2.yaml \ --input_model=/PATH/TO/frozen_mobilenet_v2.pb \ - --output_model=./lpot_mobilenetv2.pb + --output_model=./nc_mobilenetv2.pb ``` ### 6. Inception V1* @@ -157,7 +157,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni cd examples/tensorflow/image_recognition bash run_tuning.sh --config=inception_v1.yaml \ --input_model=/PATH/TO/frozen_inception_v1.pb \ - --output_model=./lpot_inceptionv1.pb + --output_model=./nc_inceptionv1.pb ``` ### 7. Inception V2* @@ -166,7 +166,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni cd examples/tensorflow/image_recognition bash run_tuning.sh --config=inception_v2.yaml \ --input_model=/PATH/TO/frozen_inception_v2.pb \ - --output_model=./lpot_inceptionv2.pb + --output_model=./nc_inceptionv2.pb ``` ### 8. Inception V3 @@ -180,7 +180,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni cd examples/tensorflow/image_recognition bash run_tuning.sh --config=inception_v3.yaml \ --input_model=/PATH/TO/inceptionv3_fp32_pretrained_model.pb \ - --output_model=./lpot_inceptionv3.pb + --output_model=./nc_inceptionv3.pb ``` ### 9. Inception V4 @@ -194,7 +194,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni cd examples/tensorflow/image_recognition bash run_tuning.sh --config=inception_v4.yaml \ --input_model=/PATH/TO/inceptionv4_fp32_pretrained_model.pb \ - --output_model=./lpot_inceptionv4.pb + --output_model=./nc_inceptionv4.pb ``` ### 10. Inception ResNet V2* @@ -203,7 +203,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni cd examples/tensorflow/image_recognition bash run_tuning.sh --config=inception_resnet_v2.yaml \ --input_model=/PATH/TO/frozen_inception_resnet_v2.pb \ - --output_model=./lpot_irv2.pb + --output_model=./nc_irv2.pb ``` ### 11. VGG 16* @@ -211,7 +211,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni ```shell cd examples/tensorflow/image_recognition bash run_tuning.sh --config=vgg16.yaml \ - --input_model=/PATH/TO/frozen_vgg16.pb --output_model=./lpot_vgg16.pb + --input_model=/PATH/TO/frozen_vgg16.pb --output_model=./nc_vgg16.pb ``` ### 12. VGG 19* @@ -219,7 +219,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni ```shell cd examples/tensorflow/image_recognition bash run_tuning.sh --config=vgg19.yaml \ - --input_model=/PATH/TO/frozen_vgg19.pb --output_model=./lpot_vgg19.pb + --input_model=/PATH/TO/frozen_vgg19.pb --output_model=./nc_vgg19.pb ``` ### 13. ResNet v2 50 @@ -227,7 +227,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni ```shell cd examples/tensorflow/image_recognition bash run_tuning.sh --config=resnet_v2_50.yaml \ - --input_model=/PATH/TO/frozen_resnet50v2_50.pb --output_model=./lpot_resnetv2_50.pb + --input_model=/PATH/TO/frozen_resnet50v2_50.pb --output_model=./nc_resnetv2_50.pb ``` ### 14. ResNet v2 101 @@ -235,7 +235,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni ```shell cd examples/tensorflow/image_recognition bash run_tuning.sh --config=resnet_v2_101.yaml \ - --input_model=/PATH/TO/frozen_resnetv2_101.pb --output_model=./lpot_resnetv2_101.pb + --input_model=/PATH/TO/frozen_resnetv2_101.pb --output_model=./nc_resnetv2_101.pb ``` ### 15. ResNet v2 152 @@ -244,7 +244,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni cd examples/tensorflow/image_recognition bash run_tuning.sh --config=resnet_v2_152.yaml \ --input_model=/PATH/TO/frozen_resnetv2_152.pb \ - --output_model=./lpot_resnetv2_152.pb + --output_model=./nc_resnetv2_152.pb ``` ### 16. Densenet-121 @@ -252,7 +252,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni ```shell cd examples/tensorflow/image_recognition bash run_tuning.sh --config=densenet121.yaml \ - --input_model=/PATH/TO/densenet121.pb --output_model=./lpot_densenet121 + --input_model=/PATH/TO/densenet121.pb --output_model=./nc_densenet121 ``` ### 17. Densenet-161 @@ -260,7 +260,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni ```shell cd examples/tensorflow/image_recognition bash run_tuning.sh --config=densenet161.yaml \ - --input_model=/PATH/TO/densenet161.pb --output_model=./lpot_densenet161 + --input_model=/PATH/TO/densenet161.pb --output_model=./nc_densenet161 ``` ### 18. Densenet-169 @@ -268,7 +268,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni ```shell cd examples/tensorflow/image_recognition bash run_tuning.sh --config=densenet169.yaml \ - --input_model=/PATH/TO/densenet169.pb --output_model=./lpot_densenet169 + --input_model=/PATH/TO/densenet169.pb --output_model=./nc_densenet169 ``` ### 19. Nasnet-mobile* @@ -276,7 +276,7 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni ```shell cd examples/tensorflow/image_recognition bash run_tuning.sh --config=nasnet_mobile.yaml \ - --input_model=/PATH/TO/frozen_nasnet_mobile.pb --output_model=./lpot_nasnet_mobile + --input_model=/PATH/TO/frozen_nasnet_mobile.pb --output_model=./nc_nasnet_mobile ``` ### 20. EfficientNet-b0 @@ -291,17 +291,17 @@ This document list steps of reproducing Intel Optimized TensorFlow image recogni cd examples/tensorflow/image_recognition bash run_tuning.sh --config=efficientnet-b0.yaml \ --input_model=/PATH/TO/efficientnet-b0 \ - --output_model=./lpot_efficientnet-b0.pb + --output_model=./nc_efficientnet-b0.pb ``` -Examples of enabling Intel® Low Precision Optimization Tool auto tuning on TensorFlow ResNet50 V1.5 +Examples of enabling Intel® Neural Compressor auto tuning on TensorFlow ResNet50 V1.5 ======================================================= -This is a tutorial of how to enable a TensorFlow image recognition model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable a TensorFlow image recognition model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports two usages: +Intel® Neural Compressor supports two usages: 1. User specifies fp32 "model", yaml configured calibration dataloader in calibration field and evaluation dataloader in evaluation field, metric in tuning.metric field of model-specific yaml config file. @@ -310,7 +310,7 @@ Intel® Low Precision Optimization Tool supports two usages: 2. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -As ResNet50 V1.5 is a typical image recognition model, use Top-K as metric which is built-in supported by Intel® Low Precision Optimization Tool. So here we integrate Tensorflow [ResNet50 V1.5](https://github.com/IntelAI/models/tree/v1.6.0/models/image_recognition/tensorflow/resnet50v1_5/inference) in [IntelAI Models](https://github.com/IntelAI/models/tree/v1.6.0) with Intel® Low Precision Optimization Tool by the first use case for simplicity. +As ResNet50 V1.5 is a typical image recognition model, use Top-K as metric which is built-in supported by Intel® Neural Compressor. So here we integrate Tensorflow [ResNet50 V1.5](https://github.com/IntelAI/models/tree/v1.6.0/models/image_recognition/tensorflow/resnet50v1_5/inference) in [IntelAI Models](https://github.com/IntelAI/models/tree/v1.6.0) with Intel® Neural Compressor by the first use case for simplicity. ### Write Yaml config file @@ -342,8 +342,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: @@ -387,7 +387,7 @@ Here we choose topk which is built-in metric and set accuracy criterion as toler There are three preparation steps in here: 1. Prepare environment ```shell -pip install intel-tensorflow==1.15.2 lpot +pip install intel-tensorflow==1.15.2 neural_compressor ``` 2. Get the model source code ```shell @@ -404,19 +404,19 @@ After completed preparation steps, we just need to add below tuning part in `eva ```python def auto_tune(self): - """This is Intel® Low Precision Optimization Tool tuning part to generate a quantized pb + """This is Intel® Neural Compressor tuning part to generate a quantized pb Returns: graph: it will return a quantized pb """ - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(self.args.config) quantizer.model = common.Model(self.args.input_graph) q_model = quantizer() return q_model ``` -Finally, add one line in `__main__` function of `eval_image_-classifier_inference.py` to use Intel® Low Precision Optimization Tool by yourself as below. +Finally, add one line in `__main__` function of `eval_image_-classifier_inference.py` to use Intel® Neural Compressor by yourself as below. ```python q_graph = evaluate_opt_graph.auto_tune() ``` diff --git a/examples/tensorflow/image_recognition/densenet121.yaml b/examples/tensorflow/image_recognition/densenet121.yaml index fe64e5c8f81..b62e4bae4a2 100644 --- a/examples/tensorflow/image_recognition/densenet121.yaml +++ b/examples/tensorflow/image_recognition/densenet121.yaml @@ -45,8 +45,8 @@ quantization: # optional. tuning constrai } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/densenet161.yaml b/examples/tensorflow/image_recognition/densenet161.yaml index 7f629064e6c..88af07e77d8 100644 --- a/examples/tensorflow/image_recognition/densenet161.yaml +++ b/examples/tensorflow/image_recognition/densenet161.yaml @@ -45,8 +45,8 @@ quantization: # optional. tuning constrai } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/densenet169.yaml b/examples/tensorflow/image_recognition/densenet169.yaml index f6116d732c3..f115bd440ef 100644 --- a/examples/tensorflow/image_recognition/densenet169.yaml +++ b/examples/tensorflow/image_recognition/densenet169.yaml @@ -45,8 +45,8 @@ quantization: # optional. tuning constrai } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/efficientnet-b0.yaml b/examples/tensorflow/image_recognition/efficientnet-b0.yaml index aa537423223..704e43f2d3f 100644 --- a/examples/tensorflow/image_recognition/efficientnet-b0.yaml +++ b/examples/tensorflow/image_recognition/efficientnet-b0.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -model: # mandatory. lpot uses this model name and framework name to decide where to save tuning history and deploy yaml. +model: # mandatory. neural_compressor uses this model name and framework name to decide where to save tuning history and deploy yaml. name: efficientnet-b0 framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. inputs: truediv @@ -38,8 +38,8 @@ quantization: # optional. tuning constrai mean: [123.675, 116.28, 103.53] std: [58.395, 57.12, 57.375] -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/imagenet_prepare/imagenet_metadata.txt b/examples/tensorflow/image_recognition/imagenet_prepare/imagenet_metadata.txt index 913a237e95d..18e15288c3b 100644 --- a/examples/tensorflow/image_recognition/imagenet_prepare/imagenet_metadata.txt +++ b/examples/tensorflow/image_recognition/imagenet_prepare/imagenet_metadata.txt @@ -11311,7 +11311,7 @@ n04442741 toastrack n04443164 tobacco pouch n04443257 tobacco shop, tobacconist shop, tobacconist n04443433 toboggan -n04443766 toby, toby jug, toby fillpot jug +n04443766 toby, toby jug, toby filneural_compressor jug n04444121 tocsin, warning bell n04444218 toe n04444749 toecap diff --git a/examples/tensorflow/image_recognition/inception_resnet_v2.yaml b/examples/tensorflow/image_recognition/inception_resnet_v2.yaml index bc024d371b8..347bd905105 100644 --- a/examples/tensorflow/image_recognition/inception_resnet_v2.yaml +++ b/examples/tensorflow/image_recognition/inception_resnet_v2.yaml @@ -33,8 +33,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/inception_v1.yaml b/examples/tensorflow/image_recognition/inception_v1.yaml index df63d41fab0..22a0947cf58 100644 --- a/examples/tensorflow/image_recognition/inception_v1.yaml +++ b/examples/tensorflow/image_recognition/inception_v1.yaml @@ -33,8 +33,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/inception_v2.yaml b/examples/tensorflow/image_recognition/inception_v2.yaml index acfa91039eb..fc4f739341c 100644 --- a/examples/tensorflow/image_recognition/inception_v2.yaml +++ b/examples/tensorflow/image_recognition/inception_v2.yaml @@ -33,8 +33,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/inception_v3.yaml b/examples/tensorflow/image_recognition/inception_v3.yaml index 081373f45a6..ed9f8154263 100644 --- a/examples/tensorflow/image_recognition/inception_v3.yaml +++ b/examples/tensorflow/image_recognition/inception_v3.yaml @@ -38,8 +38,8 @@ quantization: # optional. tuning constrai } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/inception_v4.yaml b/examples/tensorflow/image_recognition/inception_v4.yaml index b34fbf52fac..a247f2413ad 100644 --- a/examples/tensorflow/image_recognition/inception_v4.yaml +++ b/examples/tensorflow/image_recognition/inception_v4.yaml @@ -33,8 +33,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/main.py b/examples/tensorflow/image_recognition/main.py index 3e27bb4d765..d8139864bea 100644 --- a/examples/tensorflow/image_recognition/main.py +++ b/examples/tensorflow/image_recognition/main.py @@ -44,22 +44,22 @@ def __init__(self): arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode') - arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use lpot to tune.') + arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.') self.args = arg_parser.parse_args() def run(self): - """ This is lpot function include tuning and benchmark option """ + """ This is neural_compressor function include tuning and benchmark option """ if self.args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(self.args.config) quantizer.model = common.Model(self.args.input_graph) q_model = quantizer() q_model.save(self.args.output_graph) if self.args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(self.args.config) evaluator.model = common.Model(self.args.input_graph) evaluator(self.args.mode) diff --git a/examples/tensorflow/image_recognition/mobilenet_v1.yaml b/examples/tensorflow/image_recognition/mobilenet_v1.yaml index e3ae5135e77..9eb2c3782e9 100644 --- a/examples/tensorflow/image_recognition/mobilenet_v1.yaml +++ b/examples/tensorflow/image_recognition/mobilenet_v1.yaml @@ -35,8 +35,8 @@ quantization: # optional. tuning constrai weight: granularity: per_channel -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/mobilenet_v2.yaml b/examples/tensorflow/image_recognition/mobilenet_v2.yaml index e68cf365fae..b8d9b7bfd87 100644 --- a/examples/tensorflow/image_recognition/mobilenet_v2.yaml +++ b/examples/tensorflow/image_recognition/mobilenet_v2.yaml @@ -44,8 +44,8 @@ quantization: # optional. tuning constrai } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/mobilenet_v3.yaml b/examples/tensorflow/image_recognition/mobilenet_v3.yaml index 9980759b2f9..49bf418d6e7 100644 --- a/examples/tensorflow/image_recognition/mobilenet_v3.yaml +++ b/examples/tensorflow/image_recognition/mobilenet_v3.yaml @@ -30,7 +30,7 @@ quantization: # optional. tuning constrai BilinearImagenet: height: 224 width: 224 - recipes: # optional. used to switch lpot int8 receipts ON or OFF. + recipes: # optional. used to switch neural_compressor int8 receipts ON or OFF. scale_propagation_max_pooling: False model_wise: # optional. tuning constraints on model-wise for advance user to reduce tuning space. activation: @@ -52,8 +52,8 @@ quantization: # optional. tuning constrai }, } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/nasnet_mobile.yaml b/examples/tensorflow/image_recognition/nasnet_mobile.yaml index 64cf45ad716..3b20901e9c4 100644 --- a/examples/tensorflow/image_recognition/nasnet_mobile.yaml +++ b/examples/tensorflow/image_recognition/nasnet_mobile.yaml @@ -37,8 +37,8 @@ quantization: # optional. tuning constrai weight: granularity: per_channel -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/requirements.txt b/examples/tensorflow/image_recognition/requirements.txt index 98b3f30e17f..16ea87a7151 100644 --- a/examples/tensorflow/image_recognition/requirements.txt +++ b/examples/tensorflow/image_recognition/requirements.txt @@ -1,2 +1,2 @@ intel-tensorflow -lpot +neural-compressor diff --git a/examples/tensorflow/image_recognition/resnet101.yaml b/examples/tensorflow/image_recognition/resnet101.yaml index 4cf80c0bc8f..9209330d22a 100644 --- a/examples/tensorflow/image_recognition/resnet101.yaml +++ b/examples/tensorflow/image_recognition/resnet101.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/resnet50_v1.yaml b/examples/tensorflow/image_recognition/resnet50_v1.yaml index af74ca2b748..0cde9d1e551 100644 --- a/examples/tensorflow/image_recognition/resnet50_v1.yaml +++ b/examples/tensorflow/image_recognition/resnet50_v1.yaml @@ -33,8 +33,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/resnet50_v1_5.yaml b/examples/tensorflow/image_recognition/resnet50_v1_5.yaml index 64830355979..e2699a436c2 100644 --- a/examples/tensorflow/image_recognition/resnet50_v1_5.yaml +++ b/examples/tensorflow/image_recognition/resnet50_v1_5.yaml @@ -35,8 +35,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/resnet_v2_101.yaml b/examples/tensorflow/image_recognition/resnet_v2_101.yaml index 5cfa41612e0..830cadb4e6a 100644 --- a/examples/tensorflow/image_recognition/resnet_v2_101.yaml +++ b/examples/tensorflow/image_recognition/resnet_v2_101.yaml @@ -33,8 +33,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/resnet_v2_152.yaml b/examples/tensorflow/image_recognition/resnet_v2_152.yaml index 1dd81368002..32b8929b104 100644 --- a/examples/tensorflow/image_recognition/resnet_v2_152.yaml +++ b/examples/tensorflow/image_recognition/resnet_v2_152.yaml @@ -33,8 +33,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/resnet_v2_50.yaml b/examples/tensorflow/image_recognition/resnet_v2_50.yaml index 01cc5f22962..2bbd781babc 100644 --- a/examples/tensorflow/image_recognition/resnet_v2_50.yaml +++ b/examples/tensorflow/image_recognition/resnet_v2_50.yaml @@ -33,8 +33,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/slim/README.md b/examples/tensorflow/image_recognition/slim/README.md index b00dffd6049..dd988e8fd69 100644 --- a/examples/tensorflow/image_recognition/slim/README.md +++ b/examples/tensorflow/image_recognition/slim/README.md @@ -50,10 +50,10 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl # Run ## tune - ./run_tuning.sh --config=model.yaml --input_model=/path/to/input_model.ckpt --output=/path/to/save/lpot_tuned.pb + ./run_tuning.sh --config=model.yaml --input_model=/path/to/input_model.ckpt --output=/path/to/save/nc_tuned.pb ## benchmark - ./run_tuning.sh --config=model.yaml --input_model=/path/to/lpot_tuned.pb + ./run_tuning.sh --config=model.yaml --input_model=/path/to/nc_tuned.pb ### 1. resnet_v1_50 @@ -61,7 +61,7 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=resnet_v1_50.yaml \ --input_model=/PATH/TO/resnet_v1_50.ckpt \ - --output_model=./lpot_resnet_v1_50.pb + --output_model=./nc_resnet_v1_50.pb ``` ### 2. resnet_v1_101 @@ -70,7 +70,7 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=../resnet101.yaml \ --input_model=/PATH/TO/resnet_v1_101.ckpt \ - --output_model=./lpot_resnet_v1_101.pb + --output_model=./nc_resnet_v1_101.pb ``` ### 3. resnet_v1_152 @@ -79,7 +79,7 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=resnet_v1_152.yaml \ --input_model=/PATH/TO/resnet_v1_152.ckpt \ - --output_model=./lpot_resnet_v1_152.pb + --output_model=./nc_resnet_v1_152.pb ``` @@ -89,7 +89,7 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=../resnet_v2_50.yaml \ --input_model=/PATH/TO/resnet_v2_50.ckpt \ - --output_model=./lpot_resnet_v2_50.pb + --output_model=./nc_resnet_v2_50.pb ``` @@ -99,7 +99,7 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=../resnet_v2_101.yaml \ --input_model=/PATH/TO/resnet_v2_101.ckpt \ - --output_model=./lpot_resnet_v2_101.pb + --output_model=./nc_resnet_v2_101.pb ``` @@ -109,7 +109,7 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=../resnet_v2_152.yaml \ --input_model=/PATH/TO/resnet_v2_152.ckpt \ - --output_model=./lpot_resnet_v2_152.pb + --output_model=./nc_resnet_v2_152.pb ``` @@ -119,7 +119,7 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=../inception_v1.yaml \ --input_model=/PATH/TO/inception_v1.ckpt \ - --output_model=./lpot_inception_v1.pb + --output_model=./nc_inception_v1.pb ``` @@ -129,7 +129,7 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=../inception_v2.yaml \ --input_model=/PATH/TO/inception_v2.ckpt \ - --output_model=./lpot_inception_v2.pb + --output_model=./nc_inception_v2.pb ``` ### 9. inception_v3 @@ -138,7 +138,7 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=inception_v3.yaml \ --input_model=/PATH/TO/inception_v3.ckpt \ - --output_model=./lpot_inception_v3.pb + --output_model=./nc_inception_v3.pb ``` ### 10. inception_v4 @@ -147,7 +147,7 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=../inception_v4.yaml \ --input_model=/PATH/TO/inception_v4.ckpt \ - --output_model=./lpot_inception_v4.pb + --output_model=./nc_inception_v4.pb ``` ### 11. vgg16 @@ -156,7 +156,7 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=../vgg16.yaml \ --input_model=/PATH/TO/vgg_16.ckpt \ - --output_model=./lpot_vgg_16.pb + --output_model=./nc_vgg_16.pb ``` ### 12. vgg19 @@ -165,17 +165,17 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow sl cd examples/tensorflow/image_recognition/slim bash run_tuning.sh --config=../vgg19.yaml \ --input_model=/PATH/TO/vgg_19.ckpt \ - --output_model=./lpot_vgg_19.pb + --output_model=./nc_vgg_19.pb ``` -Examples of enabling Intel® Low Precision Optimization Tool auto tuning on TensorFlow Inception V1 +Examples of enabling Intel® Neural Compressor auto tuning on TensorFlow Inception V1 ======================================================= -This is a tutorial of how to enable a TensorFlow slim model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable a TensorFlow slim model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports two usages: +Intel® Neural Compressor supports two usages: 1. User specifies fp32 "model", yaml configured calibration dataloader in calibration field and evaluation dataloader in evaluation field, metric in tuning.metric field of model-specific yaml config file. @@ -184,7 +184,7 @@ Intel® Low Precision Optimization Tool supports two usages: 2. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -As Inception V1 is a typical image recognition model, use Top-K as metric which is built-in supported by Intel® Low Precision Optimization Tool. It's easy to directly use 1 method that to configure a yaml file. +As Inception V1 is a typical image recognition model, use Top-K as metric which is built-in supported by Intel® Neural Compressor. It's easy to directly use 1 method that to configure a yaml file. ### Write Yaml config file @@ -216,8 +216,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: @@ -261,7 +261,7 @@ Here we choose topk built-in metric and set accuracy target as tolerating 0.01 r There are three preparation steps in here: 1. Prepare environment ```shell -pip install intel-tensorflow==1.15.2 lpot +pip install intel-tensorflow==1.15.2 neural_compressor ``` 2. Prepare the ImageNet dataset and pretrained ckpt file ```shell @@ -274,8 +274,8 @@ This tool support tune and benchmark the model, when in the tune phase, make sur ```python - from lpot.experimental import Quantization - from lpot.adaptor.tf_utils.util import get_slim_graph + from neural_compressor.experimental import Quantization + from neural_compressor.adaptor.tf_utils.util import get_slim_graph quantizer = Quantization(self.args.config) slim_graph = get_slim_graph(args.input_graph, model_func, arg_scope, images, **kwargs) q_model = quantizer(slim_graph) @@ -286,7 +286,7 @@ when in benchmark phase: ```python - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(args.config) results = evaluator(model=args.input_graph) ``` diff --git a/examples/tensorflow/image_recognition/slim/inception_v3.yaml b/examples/tensorflow/image_recognition/slim/inception_v3.yaml index 081373f45a6..ed9f8154263 100644 --- a/examples/tensorflow/image_recognition/slim/inception_v3.yaml +++ b/examples/tensorflow/image_recognition/slim/inception_v3.yaml @@ -38,8 +38,8 @@ quantization: # optional. tuning constrai } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/slim/main.py b/examples/tensorflow/image_recognition/slim/main.py index c4de32609cd..c185c1994b2 100644 --- a/examples/tensorflow/image_recognition/slim/main.py +++ b/examples/tensorflow/image_recognition/slim/main.py @@ -20,7 +20,7 @@ import numpy as np from argparse import ArgumentParser import tensorflow as tf -from lpot.model.nets_factory import TFSlimNetsFactory +from neural_compressor.model.nets_factory import TFSlimNetsFactory import copy tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) @@ -55,7 +55,7 @@ def main(_): arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode') - arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use lpot to tune.') + arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.') args = arg_parser.parse_args() @@ -65,14 +65,14 @@ def main(_): factory.register('inception_v4', inception_v4, input_shape, inception_v4_arg_scope) if args.tune: - from lpot.experimental import Quantization + from neural_compressor.experimental import Quantization quantizer = Quantization(args.config) quantizer.model = args.input_graph q_model = quantizer() q_model.save(args.output_graph) if args.benchmark: - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(args.config) evaluator.model = args.input_graph evaluator(args.mode) diff --git a/examples/tensorflow/image_recognition/slim/requirements.txt b/examples/tensorflow/image_recognition/slim/requirements.txt index b252fbffcc3..6c690dfa4d7 100644 --- a/examples/tensorflow/image_recognition/slim/requirements.txt +++ b/examples/tensorflow/image_recognition/slim/requirements.txt @@ -1,3 +1,3 @@ intel-tensorflow==1.15.2 -lpot +neural-compressor tf_slim diff --git a/examples/tensorflow/image_recognition/slim/resnet_v1_152.yaml b/examples/tensorflow/image_recognition/slim/resnet_v1_152.yaml index 2c734ef0c72..ebf3c215306 100644 --- a/examples/tensorflow/image_recognition/slim/resnet_v1_152.yaml +++ b/examples/tensorflow/image_recognition/slim/resnet_v1_152.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/slim/resnet_v1_50.yaml b/examples/tensorflow/image_recognition/slim/resnet_v1_50.yaml index 6b61ed3f538..cb3e3768e2d 100644 --- a/examples/tensorflow/image_recognition/slim/resnet_v1_50.yaml +++ b/examples/tensorflow/image_recognition/slim/resnet_v1_50.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/vgg16.yaml b/examples/tensorflow/image_recognition/vgg16.yaml index 827c9a1f39b..c1f414f5971 100644 --- a/examples/tensorflow/image_recognition/vgg16.yaml +++ b/examples/tensorflow/image_recognition/vgg16.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/image_recognition/vgg19.yaml b/examples/tensorflow/image_recognition/vgg19.yaml index b53a213fde0..bf298bd1bcb 100644 --- a/examples/tensorflow/image_recognition/vgg19.yaml +++ b/examples/tensorflow/image_recognition/vgg19.yaml @@ -34,8 +34,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/keras/README.md b/examples/tensorflow/keras/README.md index e82b9f2668b..67c92053054 100644 --- a/examples/tensorflow/keras/README.md +++ b/examples/tensorflow/keras/README.md @@ -1,15 +1,15 @@ Step-by-Step ============ -This document is used to list steps of reproducing TensorFlow keras Intel® Low Precision Optimization Tool tuning zoo result. +This document is used to list steps of reproducing TensorFlow keras Intel® Neural Compressor tuning zoo result. ## Prerequisite ### 1. Installation ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow ```shell diff --git a/examples/tensorflow/keras/main.py b/examples/tensorflow/keras/main.py index 9945a0df23d..8c9a3b7244a 100644 --- a/examples/tensorflow/keras/main.py +++ b/examples/tensorflow/keras/main.py @@ -41,23 +41,23 @@ def __init__(self): arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark') - arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use lpot to tune.') + arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.') arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode, support performance and accuracy') self.args = arg_parser.parse_args() def run(self): - """ This is lpot function include tuning and benchmark option """ + """ This is neural_compressor function include tuning and benchmark option """ if self.args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(self.args.config) quantizer.model = common.Model(self.args.input_graph) q_model = quantizer() q_model.save(self.args.output_graph) if self.args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(self.args.config) evaluator.model = common.Model(self.args.input_graph) evaluator(self.args.mode) diff --git a/examples/tensorflow/keras/resnet50_fashion.yaml b/examples/tensorflow/keras/resnet50_fashion.yaml index e746d0c6c57..f8238eebf27 100644 --- a/examples/tensorflow/keras/resnet50_fashion.yaml +++ b/examples/tensorflow/keras/resnet50_fashion.yaml @@ -30,8 +30,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: Accuracy: {} # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/examples/tensorflow/nlp/bert_base_mrpc/README.md b/examples/tensorflow/nlp/bert_base_mrpc/README.md index 970de11d080..f7764cd7b70 100644 --- a/examples/tensorflow/nlp/bert_base_mrpc/README.md +++ b/examples/tensorflow/nlp/bert_base_mrpc/README.md @@ -1,15 +1,15 @@ Step-by-Step ============ -This document is used to list steps of reproducing TensorFlow Intel® Low Precision Optimization Tool tuning zoo result of bert base model on mrpc task. +This document is used to list steps of reproducing TensorFlow Intel® Neural Compressor tuning zoo result of bert base model on mrpc task. ## Prerequisite ### 1. Installation ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow 1.15 up2 Check your python version and use pip install 1.15.0 up2 from links below: @@ -81,10 +81,10 @@ If you want the model without iterator inside the graph, you can add --strip_ite python run_classifier.py --task_name=MRPC --data_dir=data/MRPC --vocab_file=model/vocab.txt --bert_config_file=model/bert_config.json --init_checkpoint=model/model.ckpt-343 --max_seq_length=128 --train_batch_size=32 --learning_rate=2e-5 --num_train_epochs=3.0 --output_dir=model --output_model=output_model --config=bert.yaml --tune -Details of enabling Intel® Low Precision Optimization Tool on bert model for Tensorflow. +Details of enabling Intel® Neural Compressor on bert model for Tensorflow. ========================= -This is a tutorial of how to enable bert model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable bert model with Intel® Neural Compressor. ## User Code Analysis 1. User specifies fp32 *model*, calibration dataset *q_dataloader*, evaluation dataset *eval_dataloader* and metric in tuning.metric field of model-specific yaml config file. @@ -93,7 +93,7 @@ This is a tutorial of how to enable bert model with Intel® Low Precision Optimi For bert, we applied the first one as we already have write dataset and metric for bert mrpc task. ### Write Yaml config file -In examples directory, there is a mrpc.yaml. We could remove most of items and only keep mandatory item for tuning. We also implement a calibration dataloader and have evaluation field for creation of evaluation function at internal lpot. +In examples directory, there is a mrpc.yaml. We could remove most of items and only keep mandatory item for tuning. We also implement a calibration dataloader and have evaluation field for creation of evaluation function at internal neural_compressor. ```yaml model: @@ -141,7 +141,7 @@ After prepare step is done, we add tune and benchmark code to generate quantized #### Tune ```python - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(FLAGS.config) dataset = Dataset(eval_file, FLAGS.eval_batch_size) quantizer.model = common.Model(estimator, input_fn=estimator_input_fn) @@ -153,8 +153,8 @@ After prepare step is done, we add tune and benchmark code to generate quantized ``` #### Benchmark ```python - from lpot.experimental import Benchmark, common - from lpot.model.model import get_model_type + from neural_compressor.experimental import Benchmark, common + from neural_compressor.model.model import get_model_type evaluator = Benchmark(FLAGS.config) dataset = Dataset(eval_file, FLAGS.eval_batch_size) evaluator.b_dataloader = common.DataLoader(\ @@ -167,5 +167,5 @@ After prepare step is done, we add tune and benchmark code to generate quantized evaluator.model = common.Model(estimator, input_fn=estimator_input_fn) evaluator(FLAGS.mode) ``` -The Intel® Low Precision Optimization Tool quantizer() function will return a best quantized model under time constraint. +The Intel® Neural Compressor quantizer() function will return a best quantized model under time constraint. diff --git a/examples/tensorflow/nlp/bert_base_mrpc/run_classifier.py b/examples/tensorflow/nlp/bert_base_mrpc/run_classifier.py index 5821274c7f5..ba7b243b379 100644 --- a/examples/tensorflow/nlp/bert_base_mrpc/run_classifier.py +++ b/examples/tensorflow/nlp/bert_base_mrpc/run_classifier.py @@ -35,30 +35,30 @@ FLAGS = flags.FLAGS -# lpot tune or benchmark +# neural_compressor tune or benchmark flags.DEFINE_bool( "tune", False, - "lpot tune the model.") + "neural_compressor tune the model.") flags.DEFINE_string( "config", None, - "lpot config for the model.") + "neural_compressor config for the model.") flags.DEFINE_string( "input_model", None, - "lpot input model path.") + "neural_compressor input model path.") flags.DEFINE_string( "output_model", None, - "lpot output model path.") + "neural_compressor output model path.") flags.DEFINE_bool( "benchmark", False, - "lpot benchmark the model.") + "neural_compressor benchmark the model.") flags.DEFINE_string( "mode", None, - "lpot benchmark performance or accuracy of the model.") + "neural_compressor benchmark performance or accuracy of the model.") ## Required parameters @@ -163,7 +163,7 @@ 'strip_iterator', False, 'whether to strip the iterator of the model') def strip_iterator(graph_def): - from lpot.adaptor.tf_utils.util import strip_unused_nodes + from neural_compressor.adaptor.tf_utils.util import strip_unused_nodes input_node_names = ['input_ids', 'input_mask', 'segment_ids'] output_node_names = ['loss/Softmax'] # create the placeholder and merge with the graph @@ -772,7 +772,7 @@ def tpu_scaffold(): loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) - # if use lpot reuse the eval metric + # if use neural_compressor reuse the eval metric elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits, is_real_example): @@ -1033,7 +1033,7 @@ def main(_): tf.compat.v1.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) - # BELOW IS LPOT TUNING AND BENCHMARK CODE + # BELOW IS Neural Compressor TUNING AND BENCHMARK CODE class Dataset(object): def __init__(self, file_name, batch_size): @@ -1051,7 +1051,7 @@ def collate_fn(batch): elem = batch[0] return elem - from lpot.metric import METRICS + from neural_compressor.metric import METRICS class Accuracy(object): def __init__(self): self.metric = METRICS('tensorflow')['Accuracy']() @@ -1085,7 +1085,7 @@ def result(self): is_training=False, drop_remainder=False) - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(FLAGS.config) dataset = Dataset(eval_file, FLAGS.eval_batch_size) quantizer.model = common.Model(estimator, input_fn=estimator_input_fn) @@ -1101,7 +1101,7 @@ def result(self): eval_examples = processor.get_dev_examples(FLAGS.data_dir) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(FLAGS.config) dataset = Dataset(eval_file, FLAGS.eval_batch_size) evaluator.b_dataloader = common.DataLoader(\ @@ -1109,7 +1109,7 @@ def result(self): evaluator.metric = common.Metric(metric_cls=Accuracy) - from lpot.model.model import get_model_type + from neural_compressor.model.model import get_model_type model_type = get_model_type(FLAGS.input_model) if model_type == 'frozen_pb': evaluator.model = FLAGS.input_model diff --git a/examples/tensorflow/nlp/bert_large_squad/README.md b/examples/tensorflow/nlp/bert_large_squad/README.md index ca25cf3bed7..f76d3c7f189 100644 --- a/examples/tensorflow/nlp/bert_large_squad/README.md +++ b/examples/tensorflow/nlp/bert_large_squad/README.md @@ -1,15 +1,15 @@ Step-by-Step ============ -This document is used to list steps of reproducing TensorFlow Intel® Low Precision Optimization Tool tuning zoo result of bert large model on squad v1.1 task. +This document is used to list steps of reproducing TensorFlow Intel® Neural Compressor tuning zoo result of bert large model on squad v1.1 task. ## Prerequisite ### 1. Installation ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow 1.15 up2 Check your python version and use pip install 1.15.0 up2 from links below: @@ -17,7 +17,7 @@ https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15. https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp37-cp37m-manylinux2010_x86_64.whl https://storage.googleapis.com/intel-optimized-tensorflow/intel_tensorflow-1.15.0up2-cp35-cp35m-manylinux2010_x86_64.whl -Intel Tensorflow 2.5.0 also supports since LPOT 1.6 release. +Intel Tensorflow 2.5.0 also supports since Neural Compressor 1.6 release. ```python pip install intel-tensorflow==2.5.0 ``` @@ -85,10 +85,10 @@ Now the tool will generate an int8 model with iterator inside the graph if you w ``` -Details of enabling Intel® Low Precision Optimization Tool on bert model for Tensorflow. +Details of enabling Intel® Neural Compressor on bert model for Tensorflow. ========================= -This is a tutorial of how to enable bert model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable bert model with Intel® Neural Compressor. ## User Code Analysis 1. User specifies fp32 *model*, calibration dataset *q_dataloader*, evaluation dataset *eval_dataloader* and metric in tuning.metric field of model-specific yaml config file. @@ -97,7 +97,7 @@ This is a tutorial of how to enable bert model with Intel® Low Precision Optimi For bert, we applied the first one as we already have built-in dataset and metric for bert squad task. ### Write Yaml config file -In examples directory, there is a bert.yaml. We could remove most of items and only keep mandatory item for tuning. We also implement a calibration dataloader and have evaluation field for creation of evaluation function at internal lpot. +In examples directory, there is a bert.yaml. We could remove most of items and only keep mandatory item for tuning. We also implement a calibration dataloader and have evaluation field for creation of evaluation function at internal neural_compressor. ```yaml model: @@ -162,7 +162,7 @@ After prepare step is done, we add tune and benchmark code to generate quantized #### Tune ```python - from lpot.quantization import Quantization + from neural_compressor.quantization import Quantization quantizer = Quantization('./bert.yaml') quantizer.model = FLAGS.input_model q_model = quantizer() @@ -171,7 +171,7 @@ After prepare step is done, we add tune and benchmark code to generate quantized ``` #### Benchmark ```python - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark('./bert.yaml') evaluator.model = FLAGS.input_model results = evaluator() @@ -184,4 +184,4 @@ After prepare step is done, we add tune and benchmark code to generate quantized print('Latency: {:.3f} ms'.format(latency * 1000)) print('Throughput: {:.3f} images/sec'.format(1./ latency)) ``` -The Intel® Low Precision Optimization Tool quantizer() function will return a best quantized model under time constraint. +The Intel® Neural Compressor quantizer() function will return a best quantized model under time constraint. diff --git a/examples/tensorflow/nlp/bert_large_squad/create_pretraining_data.py b/examples/tensorflow/nlp/bert_large_squad/create_pretraining_data.py index fbcc250b274..3b3dbd4bc59 100644 --- a/examples/tensorflow/nlp/bert_large_squad/create_pretraining_data.py +++ b/examples/tensorflow/nlp/bert_large_squad/create_pretraining_data.py @@ -60,11 +60,11 @@ "dupe_factor", 10, "Number of times to duplicate the input data (with different masks).") -flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probablpoty.") +flags.DEFINE_float("masked_lm_prob", 0.15, "Masked LM probability.") flags.DEFINE_float( "short_seq_prob", 0.1, - "Probablpoty of creating sequences which are shorter than the " + "Probability of creating sequences which are shorter than the " "maximum length.") diff --git a/examples/tensorflow/nlp/bert_large_squad/export_classifier.py b/examples/tensorflow/nlp/bert_large_squad/export_classifier.py index 48c320cc455..f85e3c7e805 100644 --- a/examples/tensorflow/nlp/bert_large_squad/export_classifier.py +++ b/examples/tensorflow/nlp/bert_large_squad/export_classifier.py @@ -62,7 +62,7 @@ def __init__(self, self.input_mask = placeholder(tf.int32, input_shape, name='input_mask') self.segment_ids = placeholder(tf.int32, input_shape, name='segment_ids') - self.loss, self.per_example_loss, self.logits, self.probablpoties = \ + self.loss, self.per_example_loss, self.logits, self.probabilities = \ create_model_top(bert_config, False, # is training self.input_ids, self.input_mask, self.segment_ids, self.label_ids, num_labels, use_one_hot_embeddings, @@ -105,7 +105,7 @@ def export_saved_model(self, "loss": build_tensor_info(self.loss), "per_example_loss": build_tensor_info(self.per_example_loss), "logits": build_tensor_info(self.logits), - "probablpoties": build_tensor_info(self.probablpoties) + "probabilities": build_tensor_info(self.probabilities) } signature = signature_def_utils.build_signature_def(inputs, outputs) diff --git a/examples/tensorflow/nlp/bert_large_squad/freeze_estimator_to_pb.py b/examples/tensorflow/nlp/bert_large_squad/freeze_estimator_to_pb.py index 1304494e67e..f43def945e0 100755 --- a/examples/tensorflow/nlp/bert_large_squad/freeze_estimator_to_pb.py +++ b/examples/tensorflow/nlp/bert_large_squad/freeze_estimator_to_pb.py @@ -310,7 +310,7 @@ def main(_): is_training=False, drop_remainder=False) - from lpot.adaptor.tf_utils.util import is_ckpt_format + from neural_compressor.adaptor.tf_utils.util import is_ckpt_format assert is_ckpt_format(FLAGS.input_model), 'invalid chekpoint path....' ckpt_model = [os.path.splitext(i)[0] for i in os.listdir(FLAGS.input_model) \ if i.endswith('.meta')][0] @@ -332,7 +332,7 @@ def main(_): train_batch_size=32, predict_batch_size=8) - from lpot.adaptor.tf_utils.util import get_estimator_graph + from neural_compressor.adaptor.tf_utils.util import get_estimator_graph graph = get_estimator_graph(estimator, predict_input_fn) write_graph(graph.as_graph_def(), FLAGS.output_model) diff --git a/examples/tensorflow/nlp/bert_large_squad/modeling.py b/examples/tensorflow/nlp/bert_large_squad/modeling.py index b69ede6e9ba..226c5f331a4 100644 --- a/examples/tensorflow/nlp/bert_large_squad/modeling.py +++ b/examples/tensorflow/nlp/bert_large_squad/modeling.py @@ -58,10 +58,10 @@ def __init__(self, layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. - hidden_dropout_prob: The dropout probablpoty for all fully connected + hidden_dropout_prob: The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention - probablpoties. + probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). @@ -410,7 +410,7 @@ def dropout(input_tensor, dropout_prob): Args: input_tensor: float Tensor. - dropout_prob: Python float. The probablpoty of dropping out a value (NOT of + dropout_prob: Python float. The probability of dropping out a value (NOT of *keeping* a dimension as in `tf.nn.dropout`). Returns: @@ -518,7 +518,7 @@ def embedding_postprocessor(input_tensor, max_position_embeddings: int. Maximum sequence length that might ever be used with this model. This can be longer than the sequence length of input_tensor, but cannot be shorter. - dropout_prob: float. Dropout probablpoty applied to the final output tensor. + dropout_prob: float. Dropout probability applied to the final output tensor. Returns: float tensor with same shape as `input_tensor`. @@ -646,8 +646,8 @@ def attention_layer(from_tensor, [batch_size, seq_length, size_per_head]. Then, the query and key tensors are dot-producted and scaled. These are - softmaxed to obtain attention probablpoties. The value tensors are then - interpolated by these probablpoties, then concatenated back to a single + softmaxed to obtain attention probabilities. The value tensors are then + interpolated by these probabilities, then concatenated back to a single tensor and returned. In practice, the multi-headed attention are done with transposes and @@ -666,8 +666,8 @@ def attention_layer(from_tensor, query_act: (optional) Activation function for the query transform. key_act: (optional) Activation function for the key transform. value_act: (optional) Activation function for the value transform. - attention_probs_dropout_prob: (optional) float. Dropout probablpoty of the - attention probablpoties. + attention_probs_dropout_prob: (optional) float. Dropout probability of the + attention probabilities. initializer_range: float. Range of the weight initializer. do_return_2d_tensor: bool. If True, the output will be of shape [batch_size * from_seq_length, num_attention_heads * size_per_head]. If False, the @@ -779,7 +779,7 @@ def transpose_for_scores(input_tensor, batch_size, num_attention_heads, # effectively the same as removing these entirely. attention_scores += adder - # Normalize the attention scores to probablpoties. + # Normalize the attention scores to probabilities. # `attention_probs` = [B, N, F, T] attention_probs = bf.softmax(attention_scores) @@ -848,9 +848,9 @@ def transformer_model(input_tensor, forward) layer. intermediate_act_fn: function. The non-linear activation function to apply to the output of the intermediate/feed-forward layer. - hidden_dropout_prob: float. Dropout probablpoty for the hidden layers. - attention_probs_dropout_prob: float. Dropout probablpoty of the attention - probablpoties. + hidden_dropout_prob: float. Dropout probability for the hidden layers. + attention_probs_dropout_prob: float. Dropout probability of the attention + probabilities. initializer_range: float. Range of the initializer (stddev of truncated normal). do_return_all_layers: Whether to also return all layers or just the final diff --git a/examples/tensorflow/nlp/bert_large_squad/tune_squad.py b/examples/tensorflow/nlp/bert_large_squad/tune_squad.py index b46689847bc..cf12d221f01 100755 --- a/examples/tensorflow/nlp/bert_large_squad/tune_squad.py +++ b/examples/tensorflow/nlp/bert_large_squad/tune_squad.py @@ -45,7 +45,7 @@ 'strip_iterator', False, 'whether to strip the iterator of the model') def strip_iterator(graph_def): - from lpot.adaptor.tf_utils.util import strip_unused_nodes + from neural_compressor.adaptor.tf_utils.util import strip_unused_nodes input_node_names = ['input_ids', 'input_mask', 'segment_ids'] output_node_names = ['unstack'] # create the placeholder and merge with the graph @@ -72,13 +72,13 @@ def strip_iterator(graph_def): def main(_): tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) if FLAGS.benchmark: - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(FLAGS.config) evaluator.model = FLAGS.input_model evaluator(FLAGS.mode) elif FLAGS.tune: - from lpot.experimental import Quantization + from neural_compressor.experimental import Quantization quantizer = Quantization(FLAGS.config) quantizer.model = FLAGS.input_model q_model = quantizer() diff --git a/examples/tensorflow/nlp/transformer_lt/README.md b/examples/tensorflow/nlp/transformer_lt/README.md index d52d6dc9a25..bd38da82493 100644 --- a/examples/tensorflow/nlp/transformer_lt/README.md +++ b/examples/tensorflow/nlp/transformer_lt/README.md @@ -1,14 +1,14 @@ Step-by-Step ============ -This document is used to list steps of reproducing TensorFlow Intel® Low Precision Optimization Tool tuning zoo result of Transformer-LT. +This document is used to list steps of reproducing TensorFlow Intel® Neural Compressor tuning zoo result of Transformer-LT. ## Prerequisite ### 1. Installation ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow @@ -42,10 +42,10 @@ bash prepare_dataset_model.sh python main.py --input_graph=/path/to/fp32_graphdef.pb --inputs_file=/path/to/newstest2014.en --reference_file=/path/to/newstest2014.de --vocab_file=/path/to/vocab.txt --config=./transformer_lt.yaml --tune ``` -Details of enabling Intel® Low Precision Optimization Tool on transformer-lt for Tensorflow. +Details of enabling Intel® Neural Compressor on transformer-lt for Tensorflow. ========================= -This is a tutorial of how to enable transformer-lt model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable transformer-lt model with Intel® Neural Compressor. ## User Code Analysis 1. User specifies fp32 *model*, calibration dataset *q_dataloader*, evaluation dataset *eval_dataloader* and metric in tuning.metric field of model-specific yaml config file. @@ -108,8 +108,8 @@ In this case we calibrate and quantize the model, and use our calibration datalo After prepare step is done, we add tune code to generate quantized model. ```python - from lpot.experimental import Quantization - from lpot.adaptor.tf_utils.util import write_graph + from neural_compressor.experimental import Quantization + from neural_compressor.adaptor.tf_utils.util import write_graph quantizer = Quantization(FLAGS.config) ds = Dataset(FLAGS.inputs_file, FLAGS.reference_file, FLAGS.vocab_file) quantizer.calib_dataloader = common.DataLoader(ds, collate_fn=collate_fn, batch_size=FLAGS.batch_size) @@ -119,4 +119,4 @@ After prepare step is done, we add tune code to generate quantized model. q_model.save(FLAGS.output_model) ``` -The Intel® Low Precision Optimization Tool quantizer() function will return a best quantized model under time constraint. +The Intel® Neural Compressor quantizer() function will return a best quantized model under time constraint. diff --git a/examples/tensorflow/nlp/transformer_lt/main.py b/examples/tensorflow/nlp/transformer_lt/main.py index cebbf9cfc63..f4ad76c425a 100644 --- a/examples/tensorflow/nlp/transformer_lt/main.py +++ b/examples/tensorflow/nlp/transformer_lt/main.py @@ -138,7 +138,7 @@ def eval_func(infer_graph, iteration=-1): output_tensor = infer_graph.get_tensor_by_name(\ 'model/Transformer/strided_slice_19:0') ds = Dataset(FLAGS.inputs_file, FLAGS.reference_file, FLAGS.vocab_file) - from lpot.data import DATALOADERS + from neural_compressor.data import DATALOADERS dataloader = DATALOADERS['tensorflow'](ds, batch_size=FLAGS.batch_size, collate_fn=collate_fn) config = tf.compat.v1.ConfigProto() @@ -225,7 +225,7 @@ def __len__(self): def main(_): graph = load_graph(FLAGS.input_graph) if FLAGS.mode == 'tune': - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(FLAGS.config) ds = Dataset(FLAGS.inputs_file, FLAGS.reference_file, FLAGS.vocab_file) quantizer.calib_dataloader = common.DataLoader(ds, collate_fn=collate_fn, \ diff --git a/examples/tensorflow/object_detection/README.md b/examples/tensorflow/object_detection/README.md index 10015082490..4811f463444 100644 --- a/examples/tensorflow/object_detection/README.md +++ b/examples/tensorflow/object_detection/README.md @@ -17,8 +17,8 @@ Currently, we've enabled below models. Recommend python 3.6 or higher version. ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow ```shell @@ -140,10 +140,10 @@ Now we support both pb and ckpt formats. > > 2. For ssd_resnet34 model, anno_path of evaluation/accuracy/metric/COCOmAP in config file should be "label_map.yaml" -Details of enabling Intel® Low Precision Optimization Tool on ssd_resnet50_v1 for Tensorflow. +Details of enabling Intel® Neural Compressor on ssd_resnet50_v1 for Tensorflow. ========================= -This is a tutorial of how to enable ssd_resnet50_v1 model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable ssd_resnet50_v1 model with Intel® Neural Compressor. ## User Code Analysis 1. User specifies fp32 *model*, calibration dataset *q_dataloader*, evaluation dataset *eval_dataloader* and metric in tuning.metric field of model-specific yaml config file. @@ -153,7 +153,7 @@ For ssd_resnet50_v1, we applied the latter one because our philosophy is to enab ### q_dataloader Part Adaption -Specifically, we need to add one generator to iterate the dataset per Intel® Low Precision Optimization Tool requirements. The easiest way is to implement *__iter__* interface. Below function will yield the images to feed the model as input. +Specifically, we need to add one generator to iterate the dataset per Intel® Neural Compressor requirements. The easiest way is to implement *__iter__* interface. Below function will yield the images to feed the model as input. ```python def __iter__(self): @@ -177,7 +177,7 @@ def __iter__(self): ### Evaluation Part Adaption The Class model_infer has the run_accuracy function which actually could be re-used as the eval_func. -Compare with the original version, we added the additional parameter **input_graph** as the Intel® Low Precision Optimization Tool would call this interface with the graph to be evaluated. The following code snippet also need to be added into the run_accuracy function to update the class members like self.input_tensor and self.output_tensors. +Compare with the original version, we added the additional parameter **input_graph** as the Intel® Neural Compressor would call this interface with the graph to be evaluated. The following code snippet also need to be added into the run_accuracy function to update the class members like self.input_tensor and self.output_tensors. ```python if input_graph: graph_def = get_graph_def(self.args.input_graph, self.output_layers) @@ -236,7 +236,7 @@ Here we set the input tensor and output tensors name into *inputs* and *outputs* After prepare step is done, we just need update infer_detections.py like below. ```python -from lpot.experimental import Quantization,common +from neural_compressor.experimental import Quantization,common quantizer = Quantization(args.config) quantizer.model = common.Model(args.input_graph) diff --git a/examples/tensorflow/object_detection/faster_rcnn_inception_resnet_v2.yaml b/examples/tensorflow/object_detection/faster_rcnn_inception_resnet_v2.yaml index 48b7a377b85..66cd9a22f09 100644 --- a/examples/tensorflow/object_detection/faster_rcnn_inception_resnet_v2.yaml +++ b/examples/tensorflow/object_detection/faster_rcnn_inception_resnet_v2.yaml @@ -22,7 +22,7 @@ model: # mandatory. used to specif quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. calibration: sampling_size: 10, 50, 100, 200 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. dataset: COCORecord: root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord @@ -37,10 +37,10 @@ quantization: # optional. tuning constrai algorithm: minmax evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: COCOmAP: {} - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 10 dataset: COCORecord: diff --git a/examples/tensorflow/object_detection/faster_rcnn_resnet101.yaml b/examples/tensorflow/object_detection/faster_rcnn_resnet101.yaml index 683c3d2b79a..08ca7fff348 100644 --- a/examples/tensorflow/object_detection/faster_rcnn_resnet101.yaml +++ b/examples/tensorflow/object_detection/faster_rcnn_resnet101.yaml @@ -22,7 +22,7 @@ model: # mandatory. used to specif quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. calibration: sampling_size: 10, 50, 100, 200 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. dataset: COCORecord: root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord @@ -31,10 +31,10 @@ quantization: # optional. tuning constrai size: 600 evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: COCOmAP: {} - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 10 dataset: COCORecord: diff --git a/examples/tensorflow/object_detection/main.py b/examples/tensorflow/object_detection/main.py index 8b531f82cc3..23f43511c0f 100644 --- a/examples/tensorflow/object_detection/main.py +++ b/examples/tensorflow/object_detection/main.py @@ -43,14 +43,14 @@ def __init__(self): def run(self): if self.args.tune: - from lpot.experimental import Quantization + from neural_compressor.experimental import Quantization quantizer = Quantization(self.args.config) quantizer.model = self.args.input_graph q_model = quantizer() q_model.save(self.args.output_model) if self.args.benchmark: - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(self.args.config) evaluator.model = self.args.input_graph evaluator(self.args.mode) diff --git a/examples/tensorflow/object_detection/mask_rcnn_inception_v2.yaml b/examples/tensorflow/object_detection/mask_rcnn_inception_v2.yaml index d3613d913b4..04b81096147 100644 --- a/examples/tensorflow/object_detection/mask_rcnn_inception_v2.yaml +++ b/examples/tensorflow/object_detection/mask_rcnn_inception_v2.yaml @@ -22,7 +22,7 @@ model: # mandatory. used to specif quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. calibration: sampling_size: 50 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. dataset: COCORecord: root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord @@ -36,10 +36,10 @@ quantization: # optional. tuning constrai # } evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: COCOmAP: {} - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 1 dataset: COCORecord: diff --git a/examples/tensorflow/object_detection/ssd_mobilenet_v1.yaml b/examples/tensorflow/object_detection/ssd_mobilenet_v1.yaml index 2fcf0a3bef4..3338f19c1fd 100644 --- a/examples/tensorflow/object_detection/ssd_mobilenet_v1.yaml +++ b/examples/tensorflow/object_detection/ssd_mobilenet_v1.yaml @@ -22,7 +22,7 @@ model: # mandatory. used to specif quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. calibration: sampling_size: 10, 50, 100, 200 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. dataset: COCORecord: root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord @@ -34,10 +34,10 @@ quantization: # optional. tuning constrai algorithm: minmax evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: COCOmAP: {} - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 10 dataset: COCORecord: diff --git a/examples/tensorflow/object_detection/ssd_resnet34.yaml b/examples/tensorflow/object_detection/ssd_resnet34.yaml index c588ed1cf84..9cc3c9cab67 100644 --- a/examples/tensorflow/object_detection/ssd_resnet34.yaml +++ b/examples/tensorflow/object_detection/ssd_resnet34.yaml @@ -22,7 +22,7 @@ model: # mandatory. used to specif quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. calibration: sampling_size: 100 # optional. default value is the size of whole dataset. used to set how many portions of calibration dataset is used. exclusive with iterations field. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 1 dataset: COCORaw: @@ -47,12 +47,12 @@ quantization: # optional. tuning constrai }, } evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: COCOmAP: anno_path: /path/to/annotation - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 1 dataset: COCORaw: diff --git a/examples/tensorflow/object_detection/ssd_resnet50_v1.yaml b/examples/tensorflow/object_detection/ssd_resnet50_v1.yaml index e4576da90bf..6469362b8b2 100644 --- a/examples/tensorflow/object_detection/ssd_resnet50_v1.yaml +++ b/examples/tensorflow/object_detection/ssd_resnet50_v1.yaml @@ -22,7 +22,7 @@ model: # mandatory. used to specif quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. calibration: sampling_size: 100 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. dataset: COCORecord: root: /path/to/calibration/dataset # NOTE: modify to coco2017 validation dataset TFRecord @@ -36,10 +36,10 @@ quantization: # optional. tuning constrai algorithm: minmax evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: COCOmAP: {} - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 10 dataset: COCORecord: diff --git a/examples/tensorflow/object_detection/yolo_v3/README.md b/examples/tensorflow/object_detection/yolo_v3/README.md index dc7b5d22937..9436b384e1b 100644 --- a/examples/tensorflow/object_detection/yolo_v3/README.md +++ b/examples/tensorflow/object_detection/yolo_v3/README.md @@ -1,4 +1,4 @@ -This document describes the step-by-step to reproduce Yolo-v3 tuning result with LPOT. +This document describes the step-by-step to reproduce Yolo-v3 tuning result with Neural Compressor. ## Prerequisite @@ -7,14 +7,14 @@ This document describes the step-by-step to reproduce Yolo-v3 tuning result with Recommend python 3.6 or higher version. ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow ```shell pip install intel-tensorflow==1.15.0up3 ``` -> Note: Supported Tensorflow versions please refer to LPOT readme file. +> Note: Supported Tensorflow versions please refer to Neural Compressor readme file. ### 3. Installation Dependency packages ```shell @@ -63,7 +63,7 @@ tensorflow records using the `https://github.com/tensorflow/models.git` dedicate #### Manual dataset download Download CoCo Dataset from [Official Website](https://cocodataset.org/#download). -## Get Quantized Yolo-v3 model with LPOT +## Get Quantized Yolo-v3 model with Neural Compressor ### 1.Config the yolo_v3.yaml with the valid cocoraw data path. @@ -76,4 +76,4 @@ cd examples/tensorflow/object_detection/yolo_v3 python infer_detections.py --input_graph /path/to/yolov3_fp32.pb --config ./yolo_v3.yaml --output_graph /path/to/save/yolov3_tuned3.pb ``` -Finally, the LPOT will generate the quantized Yolo-v3 model with relative 1% loss. +Finally, the program will generate the quantized Yolo-v3 model with relative 1% loss. diff --git a/examples/tensorflow/object_detection/yolo_v3/infer_detections.py b/examples/tensorflow/object_detection/yolo_v3/infer_detections.py index 5efe3329fc5..1deb3622c2a 100644 --- a/examples/tensorflow/object_detection/yolo_v3/infer_detections.py +++ b/examples/tensorflow/object_detection/yolo_v3/infer_detections.py @@ -16,7 +16,7 @@ flags.DEFINE_string("output_graph", None, "input graph") -flags.DEFINE_string("config", None, "LPOT config file") +flags.DEFINE_string("config", None, "Neural Compressor config file") flags.DEFINE_float("conf_threshold", 0.5, "confidence threshold") @@ -140,7 +140,7 @@ def main(_): run_benchmark() else: FLAGS.batch_size = 1 - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(FLAGS.config) quantizer.model = common.Model(FLAGS.input_graph) kwargs = {'conf_threshold': FLAGS.conf_threshold, diff --git a/examples/tensorflow/object_detection/yolo_v3/yolo_v3.yaml b/examples/tensorflow/object_detection/yolo_v3/yolo_v3.yaml index cd75478418f..6d79f16ce52 100644 --- a/examples/tensorflow/object_detection/yolo_v3/yolo_v3.yaml +++ b/examples/tensorflow/object_detection/yolo_v3/yolo_v3.yaml @@ -1,4 +1,4 @@ -model: # mandatory. lpot uses this model name and framework name to decide where to save tuning history and deploy yaml. +model: # mandatory. neural_compressor uses this model name and framework name to decide where to save tuning history and deploy yaml. name: yolo_v3 framework: tensorflow # mandatory. supported values are tensorflow, pytorch, or mxnet; allow new framework backend extension. inputs: inputs @@ -7,7 +7,7 @@ model: # mandatory. lpot uses this quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. calibration: sampling_size: 100 # optional. default value is the size of whole dataset. used to set how many portions of calibration dataset is used. exclusive with iterations field. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 1 dataset: COCORecord: diff --git a/examples/tensorflow/oob_models/README.md b/examples/tensorflow/oob_models/README.md index c211fb9bd6b..ab836ed096b 100644 --- a/examples/tensorflow/oob_models/README.md +++ b/examples/tensorflow/oob_models/README.md @@ -9,8 +9,8 @@ This document is used to list steps of reproducing Intel Optimized TensorFlow OO Recommend python 3.6 or higher version. ```bash - # Install Intel® Low Precision Optimization Tool - pip install lpot + # Install Intel® Neural Compressor + pip install neural-compressor pip install intel-tensorflow ``` > Note: Supported Tensorflow [Version](../../../README.md). diff --git a/examples/tensorflow/oob_models/find_outputs.py b/examples/tensorflow/oob_models/find_outputs.py index 3fc90a14543..a4a01bdac50 100644 --- a/examples/tensorflow/oob_models/find_outputs.py +++ b/examples/tensorflow/oob_models/find_outputs.py @@ -135,8 +135,8 @@ def get_input_output(graph_path, args): # give a fix shape if not get input shape fix_dynamic_shape = 300 - if args.use_lpot: - from lpot.experimental import common + if args.use_nc: + from neural_compressor.experimental import common model = common.Model(graph_path) graph_def = model.graph_def output_nodes = summarize_graph(graph_def, fix_dynamic_shape) diff --git a/examples/tensorflow/oob_models/run_benchmark.sh b/examples/tensorflow/oob_models/run_benchmark.sh index a2608fe2641..d1fd2d23dc4 100755 --- a/examples/tensorflow/oob_models/run_benchmark.sh +++ b/examples/tensorflow/oob_models/run_benchmark.sh @@ -107,8 +107,8 @@ vggvox -------- ) -# lpot graph_def -models_need_lpot_graphdef=( +# neural_compressor graph_def +models_need_nc_graphdef=( -------- pose-ae-multiperson pose-ae-refinement @@ -146,9 +146,9 @@ function run_benchmark { echo "$topology need to disable optimize_for_inference!" extra_cmd+=" --disable_optimize " fi - if [[ "${models_need_lpot_graphdef[@]}" =~ " ${topology} " ]]; then - echo "$topology need lpot graph_def!" - extra_cmd+=" --use_lpot " + if [[ "${models_need_nc_graphdef[@]}" =~ " ${topology} " ]]; then + echo "$topology need neural_compressor graph_def!" + extra_cmd+=" --use_nc " fi python tf_benchmark.py \ diff --git a/examples/tensorflow/oob_models/run_tuning.sh b/examples/tensorflow/oob_models/run_tuning.sh index c67fc5e99f3..1c71aa9dbaf 100755 --- a/examples/tensorflow/oob_models/run_tuning.sh +++ b/examples/tensorflow/oob_models/run_tuning.sh @@ -102,8 +102,8 @@ ens3_adv_inception_v3 -------- ) -# lpot graph_def -models_need_lpot_graphdef=( +# neural_compressor graph_def +models_need_nc_graphdef=( -------- pose-ae-multiperson pose-ae-refinement @@ -152,9 +152,9 @@ function run_tuning { echo "$topology need to set bs = 32!" extra_cmd+=" -b 32 " fi - if [[ "${models_need_lpot_graphdef[@]}" =~ " ${topology} " ]]; then - echo "$topology need lpot graph_def!" - extra_cmd+=" --use_lpot " + if [[ "${models_need_nc_graphdef[@]}" =~ " ${topology} " ]]; then + echo "$topology need neural_compressor graph_def!" + extra_cmd+=" --use_nc " fi python tf_benchmark.py \ diff --git a/examples/tensorflow/oob_models/tf_benchmark.py b/examples/tensorflow/oob_models/tf_benchmark.py index 01f7fb8be11..dc4b25d598c 100644 --- a/examples/tensorflow/oob_models/tf_benchmark.py +++ b/examples/tensorflow/oob_models/tf_benchmark.py @@ -34,8 +34,8 @@ def initialize_graph(model_details, args): in_name + ":0": tf_v1.Variable(val) for in_name, val in model_details['input'].items()} - if args.use_lpot: - from lpot.experimental import common + if args.use_nc: + from neural_compressor.experimental import common model = common.Model(model_details['model_dir']) od_graph_def = model.graph_def else: @@ -210,11 +210,11 @@ def __iter__(self): parser.add_argument("--is_meta", action='store_true', help="input a meta file") parser.add_argument("--save_graph", action='store_true', help="save_graph") parser.add_argument("--benchmark", action='store_true', help="Benchmark.") - parser.add_argument("--use_lpot", action='store_true', help="Find input/output via lpot.") + parser.add_argument("--use_nc", action='store_true', help="Find input/output via neural_compressor.") # tuning - parser.add_argument("--yaml", type=str, help="config yaml file of lpot.", default='./config.yaml') - parser.add_argument("--tune", action='store_true', help="Do lpot optimize.") - parser.add_argument("--output_path", help="path of lpot convert model", default='./lpot-tune.pb') + parser.add_argument("--yaml", type=str, help="config yaml file of neural_compressor.", default='./config.yaml') + parser.add_argument("--tune", action='store_true', help="Do neural_compressor optimize.") + parser.add_argument("--output_path", help="path of neural_compressor convert model", default='./nc-tune.pb') # args args = parser.parse_args() @@ -281,7 +281,7 @@ def __iter__(self): # tune if args.tune: # os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common inputs = model_detail['input'] outputs = model_detail['output'] _write_inputs_outputs_to_yaml(args.yaml, "./config_tmp.yaml", list(inputs.keys()), outputs) diff --git a/examples/tensorflow/pruning/resnet_v2/README.md b/examples/tensorflow/pruning/resnet_v2/README.md index 0ac0f2628b5..53f9cfa1324 100644 --- a/examples/tensorflow/pruning/resnet_v2/README.md +++ b/examples/tensorflow/pruning/resnet_v2/README.md @@ -1,15 +1,15 @@ Step-by-Step ============ -This document is used to list steps of reproducing Intel® Low Precision Optimization Tool magnitude pruning feature. +This document is used to list steps of reproducing Intel® Neural Compressor magnitude pruning feature. ## Prerequisite ### 1. Installation ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow 2.4.0 or above. ```shell diff --git a/examples/tensorflow/pruning/resnet_v2/benchmark.py b/examples/tensorflow/pruning/resnet_v2/benchmark.py index b89b9dedb61..28d47de5c95 100644 --- a/examples/tensorflow/pruning/resnet_v2/benchmark.py +++ b/examples/tensorflow/pruning/resnet_v2/benchmark.py @@ -30,7 +30,7 @@ def __getitem__(self, idx): return self.test_images[idx], self.test_labels[idx] -from lpot.experimental import Benchmark, common +from neural_compressor.experimental import Benchmark, common evaluator = Benchmark('benchmark.yaml') evaluator.model = common.Model('./pruned_model') evaluator.b_dataloader = common.DataLoader(EvalDataset()) diff --git a/examples/tensorflow/pruning/resnet_v2/benchmark.yaml b/examples/tensorflow/pruning/resnet_v2/benchmark.yaml index 3a1a7470326..f58a5c6d404 100644 --- a/examples/tensorflow/pruning/resnet_v2/benchmark.yaml +++ b/examples/tensorflow/pruning/resnet_v2/benchmark.yaml @@ -19,7 +19,7 @@ model: # mandatory. used to specif name: resnet_v2_prune framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. performance: # optional. used to benchmark performance of passing model. iteration: 100 configs: diff --git a/examples/tensorflow/pruning/resnet_v2/main.py b/examples/tensorflow/pruning/resnet_v2/main.py index b0b0a218097..42334d3d6eb 100644 --- a/examples/tensorflow/pruning/resnet_v2/main.py +++ b/examples/tensorflow/pruning/resnet_v2/main.py @@ -318,8 +318,8 @@ def __getitem__(self, idx): if __name__ == '__main__': train() - from lpot.experimental import Pruning, common - from lpot.utils import logger + from neural_compressor.experimental import Pruning, common + from neural_compressor.utils import logger prune = Pruning("./prune.yaml") prune.eval_dataloader = common.DataLoader(EvalDataset()) prune.train_dataloader = common.DataLoader(TrainDataset()) diff --git a/examples/tensorflow/qat/resnet_v2.py b/examples/tensorflow/qat/resnet_v2.py index d394d73e54d..2c749b990b8 100644 --- a/examples/tensorflow/qat/resnet_v2.py +++ b/examples/tensorflow/qat/resnet_v2.py @@ -367,7 +367,7 @@ def __getitem__(self, idx): if __name__ == '__main__': build_fake_yaml() train() - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization('fake_yaml.yaml') quantizer.eval_dataloader = common.DataLoader(Dataset()) quantizer.model = './baseline_model' diff --git a/examples/tensorflow/qat_conversion/README.md b/examples/tensorflow/qat_conversion/README.md index 58760119dd1..80001e08ee2 100644 --- a/examples/tensorflow/qat_conversion/README.md +++ b/examples/tensorflow/qat_conversion/README.md @@ -1,15 +1,15 @@ Step-by-Step ============ -This document is used to list steps of reproducing TensorFlow keras Intel® Low Precision Optimization Tool QAT conversion. +This document is used to list steps of reproducing TensorFlow keras Intel® Neural Compressor QAT conversion. ## Prerequisite ### 1. Installation ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow and TensorFlow Model Optimization ```shell diff --git a/examples/tensorflow/qat_conversion/benchmark.py b/examples/tensorflow/qat_conversion/benchmark.py index e31fb0226a4..df49ab3b075 100644 --- a/examples/tensorflow/qat_conversion/benchmark.py +++ b/examples/tensorflow/qat_conversion/benchmark.py @@ -21,7 +21,7 @@ def __iter__(self): yield self.test_images[self.i: self.i + self.batch_size], self.test_labels[self.i: self.i + self.batch_size] self.i = self.i + self.batch_size -from lpot.experimental import Benchmark, common +from neural_compressor.experimental import Benchmark, common evaluator = Benchmark('mnist.yaml') evaluator.model = common.Model('quantized_model') evaluator.b_dataloader = dataloader() diff --git a/examples/tensorflow/qat_conversion/convert.py b/examples/tensorflow/qat_conversion/convert.py index 5db63d8fc8a..f1b8c7054b3 100644 --- a/examples/tensorflow/qat_conversion/convert.py +++ b/examples/tensorflow/qat_conversion/convert.py @@ -1,4 +1,4 @@ -from lpot.experimental import ModelConversion, common +from neural_compressor.experimental import ModelConversion, common conversion = ModelConversion() conversion.source = 'QAT' conversion.destination = 'default' diff --git a/examples/tensorflow/qat_conversion/mnist.yaml b/examples/tensorflow/qat_conversion/mnist.yaml index c79a01b6b19..e27b54a0044 100644 --- a/examples/tensorflow/qat_conversion/mnist.yaml +++ b/examples/tensorflow/qat_conversion/mnist.yaml @@ -17,8 +17,8 @@ model: # mandatory. used to specif name: mnist framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: Accuracy: {} # built-in metrics are topk, map, f1, allow user to register new metric. diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/README.md b/examples/tensorflow/recommendation/wide_deep_large_ds/README.md index 65207b566d7..1955769b7cc 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/README.md +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/README.md @@ -7,8 +7,8 @@ This document is used to list steps of reproducing TensorFlow Wide & Deep tuning ### 1. Installation ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow ```shell diff --git a/examples/tensorflow/recommendation/wide_deep_large_ds/inference.py b/examples/tensorflow/recommendation/wide_deep_large_ds/inference.py index 7f7182c78d6..c164df83cfa 100644 --- a/examples/tensorflow/recommendation/wide_deep_large_ds/inference.py +++ b/examples/tensorflow/recommendation/wide_deep_large_ds/inference.py @@ -159,7 +159,7 @@ def __init__(self): arg_parser.add_argument('--tune', dest='tune', action='store_true', - help='use lpot to tune.') + help='use neural_compressor to tune.') arg_parser.add_argument("--warmup-steps", type=int, default=50, help="number of warmup steps") @@ -176,11 +176,11 @@ def __init__(self): self.args = arg_parser.parse_args() def auto_tune(self): - """This is lpot tuning part to generate a quantized pb + """This is neural_compressor tuning part to generate a quantized pb Returns: graph: it will return a quantized pb """ - from lpot.experimental import Quantization + from neural_compressor.experimental import Quantization infer_graph = load_graph(self.args.input_graph) quantizer = Quantization(self.args.config) if self.args.calib_data: @@ -292,7 +292,7 @@ def eval_inference(self, infer_graph): return accuracy def run(self): - """ This is lpot function include tuning and benchmark option """ + """ This is neural_compressor function include tuning and benchmark option """ if self.args.tune: q_model = evaluate_opt_graph.auto_tune() diff --git a/examples/tensorflow/saved_model/README.md b/examples/tensorflow/saved_model/README.md index c5bb3577477..9f9bc12d88d 100644 --- a/examples/tensorflow/saved_model/README.md +++ b/examples/tensorflow/saved_model/README.md @@ -1,15 +1,15 @@ Step-by-Step ============ -This document is used to list steps of reproducing TensorFlow saved model Intel® Low Precision Optimization Tool tuning zoo result. +This document is used to list steps of reproducing TensorFlow saved model Intel® Neural Compressor tuning zoo result. ## Prerequisite ### 1. Installation ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow ```shell @@ -37,5 +37,5 @@ object detection ## Run Command ```shell - bash run_tuning.sh --config=./config.yaml --input_model=./ssd_resnet50_v1 --output_model=./lpot_ssd_resnet50_v1 + bash run_tuning.sh --config=./config.yaml --input_model=./ssd_resnet50_v1 --output_model=./nc_ssd_resnet50_v1 ``` diff --git a/examples/tensorflow/saved_model/main.py b/examples/tensorflow/saved_model/main.py index 42177b131be..4bdc1db9210 100644 --- a/examples/tensorflow/saved_model/main.py +++ b/examples/tensorflow/saved_model/main.py @@ -42,14 +42,14 @@ def __init__(self): def run(self): if self.args.tune: - from lpot.experimental import Quantization + from neural_compressor.experimental import Quantization quantizer = Quantization(self.args.config) quantizer.model = self.args.input_graph q_model = quantizer() q_model.save(self.args.output_model) if self.args.benchmark: - from lpot.experimental import Benchmark + from neural_compressor.experimental import Benchmark evaluator = Benchmark(self.args.config) evaluator.model = self.args.input_graph evaluator(self.args.mode) diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/README.md b/examples/tensorflow/semantic_image_segmentation/deeplab/README.md index bb4bc161cf7..ccc5223ea13 100644 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/README.md +++ b/examples/tensorflow/semantic_image_segmentation/deeplab/README.md @@ -1,7 +1,7 @@ Step-by-Step ============ -This document list steps of reproducing Intel Optimized TensorFlow image recognition models tuning results via LPOT. +This document list steps of reproducing Intel Optimized TensorFlow image recognition models tuning results via Neural Compressor. > **Note**: > Most of those models are both supported in Intel optimized TF 1.15.x and Intel optimized TF 2.x. @@ -56,18 +56,18 @@ python deeplab/export_model.py \ # Run ```shell cd examples/tensorflow/semantic_image_segmentation/deeplab -bash run_tuning.sh --config=deeplab.yaml --input_model=/PATH/TO/deeplab_export.pb --output_model=./lpot_deeplab.pb +bash run_tuning.sh --config=deeplab.yaml --input_model=/PATH/TO/deeplab_export.pb --output_model=./nc_deeplab.pb ``` -Examples of enabling Intel® Low Precision Optimization Tool auto tuning on Deeplab model for tensorflow +Examples of enabling Intel® Neural Compressor auto tuning on Deeplab model for tensorflow ======================================================= -This is a tutorial of how to enable deeplab model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable deeplab model with Intel® Neural Compressor. # User Code Analysis -Intel® Low Precision Optimization Tool supports two usages: +Intel® Neural Compressor supports two usages: 1. User specifies fp32 "model", yaml configured calibration dataloader in calibration field and evaluation dataloader in evaluation field, metric in tuning.metric field of model-specific yaml config file. @@ -76,7 +76,7 @@ Intel® Low Precision Optimization Tool supports two usages: 2. User specifies fp32 "model", calibration dataset "q_dataloader" and a custom "eval_func" which encapsulates the evaluation dataset and metric by itself. -We provide Deeplab model pretrained on PASCAL VOC 2012, Using mIOU as metric which is built-in supported by Intel® Low Precision Optimization Tool. +We provide Deeplab model pretrained on PASCAL VOC 2012, Using mIOU as metric which is built-in supported by Intel® Neural Compressor. ### Write Yaml config file @@ -86,7 +86,7 @@ In examples directory, there is a template.yaml. We could remove most of the ite ```yaml # deeplab.yaml -model: # mandatory. lpot uses this model name and framework name to decide where to save tuning history and deploy yaml. +model: # mandatory. neural_compressor uses this model name and framework name to decide where to save tuning history and deploy yaml. name: deeplab framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. @@ -102,8 +102,8 @@ quantization: # optional. tuning constrai ParseDecodeVoc: {} -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: mIOU: num_classes: 21 # built-in metrics are topk, map, f1, allow user to register new metric. @@ -144,7 +144,7 @@ Here we choose topk which is built-in metric and set accuracy criterion as toler After completed preparation steps, we just need to add below tuning part in `eval_classifier_optimized_graph` class. ```python -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common quantizer = Quantization(self.args.config) quantizer.model = common.Model(self.args.input_graph) q_model = quantizer() @@ -153,7 +153,7 @@ q_model.save(self.args.output_graph) ### Benchmark ```python -from lpot.experimental import Benchmark, common +from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(self.args.config) evaluator.model = common.Model(self.args.input_graph) evaluator(self.args.mode) diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/deeplab.yaml b/examples/tensorflow/semantic_image_segmentation/deeplab/deeplab.yaml index d663901b832..ad98faedd8e 100644 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/deeplab.yaml +++ b/examples/tensorflow/semantic_image_segmentation/deeplab/deeplab.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -model: # mandatory. lpot uses this model name and framework name to decide where to save tuning history and deploy yaml. +model: # mandatory. neural_compressor uses this model name and framework name to decide where to save tuning history and deploy yaml. name: deeplab framework: tensorflow # mandatory. supported values are tensorflow, pytorch, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. inputs: ImageTensor @@ -30,8 +30,8 @@ quantization: # optional. tuning constrai ParseDecodeVoc: {} -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: mIOU: num_classes: 21 # built-in metrics are topk, map, f1, allow user to register new metric. diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/main.py b/examples/tensorflow/semantic_image_segmentation/deeplab/main.py index 7232d8a11f5..b151ebbe0f2 100644 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/main.py +++ b/examples/tensorflow/semantic_image_segmentation/deeplab/main.py @@ -44,22 +44,22 @@ def __init__(self): arg_parser.add_argument('--mode', dest='mode', default='performance', help='benchmark mode') - arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use lpot to tune.') + arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use neural_compressor to tune.') self.args = arg_parser.parse_args() def run(self): - """ This is lpot function include tuning and benchmark option """ + """ This is neural_compressor function include tuning and benchmark option """ if self.args.tune: - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common quantizer = Quantization(self.args.config) quantizer.model = common.Model(self.args.input_graph) q_model = quantizer() q_model.save(self.args.output_graph) if self.args.benchmark: - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common evaluator = Benchmark(self.args.config) evaluator.model = common.Model(self.args.input_graph) evaluator(self.args.mode) diff --git a/examples/tensorflow/semantic_image_segmentation/deeplab/requirements.txt b/examples/tensorflow/semantic_image_segmentation/deeplab/requirements.txt index 98b3f30e17f..16ea87a7151 100644 --- a/examples/tensorflow/semantic_image_segmentation/deeplab/requirements.txt +++ b/examples/tensorflow/semantic_image_segmentation/deeplab/requirements.txt @@ -1,2 +1,2 @@ intel-tensorflow -lpot +neural-compressor diff --git a/examples/tensorflow/style_transfer/README.md b/examples/tensorflow/style_transfer/README.md index 6329a244fc7..51e82a0ce13 100644 --- a/examples/tensorflow/style_transfer/README.md +++ b/examples/tensorflow/style_transfer/README.md @@ -1,15 +1,15 @@ Step-by-Step ============ -This document is used to list steps of reproducing TensorFlow style transfer Intel® Low Precision Optimization Tool tuning zoo result. +This document is used to list steps of reproducing TensorFlow style transfer Intel® Neural Compressor tuning zoo result. ## Prerequisite ### 1. Installation ```shell -# Install Intel® Low Precision Optimization Tool -pip install lpot +# Install Intel® Neural Compressor +pip install neural-compressor ``` ### 2. Install Intel Tensorflow ```shell @@ -51,8 +51,8 @@ tar -xvzf arbitrary_style_transfer.tar.gz ./model ```shell python style_tune.py --output_dir=./result --style_images_paths=./style_images --content_images_paths=./content_images --input_model=./model/model.ckpt ``` -### Quantize with lpot -#### 1. Tune model with lpot +### Quantize with neural_compressor +#### 1. Tune model with neural_compressor ```shell bash run_tuning.sh --dataset_location=style_images/,content_images/ --input_model=./model/model.ckpt --output_model=saved_model ``` @@ -61,16 +61,16 @@ tar -xvzf arbitrary_style_transfer.tar.gz ./model bash run_benchmark.sh --dataset_location=style_images/,content_images/ --input_model=saved_model.pb --batch_size=1 ``` -Details of enabling Intel® Low Precision Optimization Tool on style transfer for Tensorflow. +Details of enabling Intel® Neural Compressor on style transfer for Tensorflow. ========================= -This is a tutorial of how to enable style_transfer model with Intel® Low Precision Optimization Tool. +This is a tutorial of how to enable style_transfer model with Intel® Neural Compressor. ## User Code Analysis 1. User specifies fp32 *model*, calibration dataset *q_dataloader*, evaluation dataset *eval_dataloader* and metric in tuning.metric field of model-specific yaml config file. 2. User specifies fp32 *model*, calibration dataset *q_dataloader* and a custom *eval_func* which encapsulates the evaluation dataset and metric by itself. -For style_transfer, we applied the latter one because we don't have metric for style transfer model.The first one is to implement the q_dataloader and implement a fake *eval_func*. As lpot have implement a style_transfer dataset, so only eval_func should be prepared after load the graph +For style_transfer, we applied the latter one because we don't have metric for style transfer model.The first one is to implement the q_dataloader and implement a fake *eval_func*. As neural_compressor have implement a style_transfer dataset, so only eval_func should be prepared after load the graph ### Evaluation Part Adaption As style transfer don't have a metric to measure the accuracy, we only implement a fake eval_func @@ -120,7 +120,7 @@ Here we set the input tensor and output tensors name into *inputs* and *outputs* After prepare step is done, we just need add 2 lines to get the quantized model. ```python -from lpot.experimental import Quantization +from neural_compressor.experimental import Quantization quantizer = Quantization(args.config) quantizer.model = graph @@ -128,4 +128,4 @@ quantizer.eval_func = eval_func q_model = quantizer() ``` -The Intel® Low Precision Optimization Tool quantizer() function will return a best quantized model during timeout constrain. +The Intel® Neural Compressor quantizer() function will return a best quantized model during timeout constrain. diff --git a/examples/tensorflow/style_transfer/conf.yaml b/examples/tensorflow/style_transfer/conf.yaml index a07f64a97ce..68160b1e254 100644 --- a/examples/tensorflow/style_transfer/conf.yaml +++ b/examples/tensorflow/style_transfer/conf.yaml @@ -21,7 +21,7 @@ model: # mandatory. used to specif quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. calibration: - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 2 dataset: style_transfer: @@ -30,7 +30,7 @@ quantization: # optional. tuning constrai evaluation: performance: - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 2 dataset: style_transfer: diff --git a/examples/tensorflow/style_transfer/style_tune.py b/examples/tensorflow/style_transfer/style_tune.py index ce14402178e..285dcd3a33d 100644 --- a/examples/tensorflow/style_transfer/style_tune.py +++ b/examples/tensorflow/style_transfer/style_tune.py @@ -25,9 +25,9 @@ import tensorflow.compat.v1 as tf from PIL import Image import time -from lpot.experimental import Quantization -from lpot.data import DATALOADERS, DATASETS -from lpot.adaptor.tf_utils.util import _parse_ckpt_bn_input +from neural_compressor.experimental import Quantization +from neural_compressor.data import DATALOADERS, DATASETS +from neural_compressor.adaptor.tf_utils.util import _parse_ckpt_bn_input flags = tf.flags flags.DEFINE_string('style_images_paths', None, 'Paths to the style images' diff --git a/examples_readme.md b/examples_readme.md index 2449fb2b1d5..f46f3d55d4e 100644 --- a/examples_readme.md +++ b/examples_readme.md @@ -1,6 +1,6 @@ Examples ======== -A wide variety of examples are provided to demonstrate the usage of Intel® Low Precision Optimization Tool in different frameworks: TensorFlow, PyTorch, MXNet, and ONNX Runtime. +A wide variety of examples are provided to demonstrate the usage of Intel® Neural Compressor in different frameworks: TensorFlow, PyTorch, MXNet, and ONNX Runtime. -View the examples from the [LPOT GitHub repo](https://github.com/intel/lpot/tree/master/examples). \ No newline at end of file +View the examples from the [Neural Compressor GitHub repo](https://github.com/intel/neural-compressor/tree/master/examples). \ No newline at end of file diff --git a/getting_started.md b/getting_started.md index 1cfe6b05727..eeb5af09933 100644 --- a/getting_started.md +++ b/getting_started.md @@ -3,43 +3,43 @@ Getting Started ## Installation -The Intel® LPOT library is released as part of the +The Intel® Neural Compressor library is released as part of the [Intel® oneAPI AI Analytics Toolkit](https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit.html) (AI Kit). The AI Kit provides a consolidated package of Intel's latest deep learning and machine optimizations all in one place for ease of development. Along with -LPOT, the AI Kit includes Intel-optimized versions of deep learning frameworks +Neural Compressor, the AI Kit includes Intel-optimized versions of deep learning frameworks (such as TensorFlow and PyTorch) and high-performing Python libraries to streamline end-to-end data science and AI workflows on Intel architectures. ### Linux Installation -You can install just the LPOT library from binary or source, or you can get -the Intel-optimized framework together with the LPOT library by installing the +You can install just the library from binary or source, or you can get +the Intel-optimized framework together with the library by installing the Intel® oneAPI AI Analytics Toolkit. #### Install from binary ```Shell # install from pip - pip install lpot + pip install neural-compressor # install from conda - conda install lpot -c conda-forge -c intel + conda install neural-compressor -c conda-forge -c intel ``` #### Install from source ```Shell - git clone https://github.com/intel/lpot.git - cd lpot + git clone https://github.com/intel/neural-compressor.git + cd neural-compressor pip install -r requirements.txt python setup.py install ``` #### Install from AI Kit -The AI Kit, which includes the LPOT +The AI Kit, which includes the library, is distributed through many common channels, including from Intel's website, YUM, APT, Anaconda, and more. Select and [download](https://software.intel.com/content/www/us/en/develop/tools/oneapi/ai-analytics-toolkit/download.html) @@ -60,58 +60,60 @@ The following prerequisites and requirements must be satisfied for a successful - Download and install [anaconda](https://anaconda.org/). -- Create a virtual environment named lpot in anaconda: +- Create a virtual environment named nc in anaconda: ```shell # Here we install python 3.7 for instance. You can also choose python 3.6, 3.8, or 3.9. - conda create -n lpot python=3.7 - conda activate lpot + conda create -n nc python=3.7 + conda activate nc ``` #### Install from binary ```Shell # install from pip - pip install lpot + pip install neural-compressor # install from conda - conda install lpot -c conda-forge -c intel + conda install neural-compressor -c conda-forge -c intel ``` #### Install from source ```shell -git clone https://github.com/intel/lpot.git -cd lpot +git clone https://github.com/intel/neural-compressor.git +cd neural-compressor pip install -r requirements.txt python setup.py install ``` ## Tutorials and Examples -Read the following resources to learn how to use LPOT. +Read the following resources to learn how to use Neural Compressor. ### Tutorial -The [Tutorial](../docs/tutorial.md) provides comprehensive instructions on how to utilize Intel® Low Precision Optimization Tool's features with examples. +The [Tutorial](../docs/tutorial.md) provides comprehensive instructions on how to utilize Intel® Neural Compressor's features with examples. ### Examples -[Examples](examples_readme.md) are provided to demonstrate the usage of Intel® Low Precision Optimization Tool in different frameworks: TensorFlow, PyTorch, MXNet, and ONNX Runtime. Hello World examples are also available. +[Examples](examples_readme.md) are provided to demonstrate the usage of Intel® Neural Compressor in different frameworks: TensorFlow, PyTorch, MXNet, and ONNX Runtime. Hello World examples are also available. ## Developer Documentation -View LPOT [Documentation](docs/doclist.rst) for getting started, deep dive, and advanced resources to help you use and develop LPOT. +View Neural Compressor [Documentation](docs/doclist.rst) for getting started, deep dive, and advanced resources to help you use and develop Neural Compressor. ## System Requirements -Intel® Low Precision Optimization Tool supports systems based on [Intel 64 architecture or compatible processors](https://en.wikipedia.org/wiki/X86-64), specially optimized for the following CPUs: +Intel® Neural Compressor supports systems based on [Intel 64 architecture or compatible processors](https://en.wikipedia.org/wiki/X86-64), specially optimized for the following CPUs: * Intel Xeon Scalable processor (formerly Skylake, Cascade Lake, Cooper Lake, and Icelake) * future Intel Xeon Scalable processor (code name Sapphire Rapids) -Intel® Low Precision Optimization Tool requires installing the pertinent Intel-optimized framework version for TensorFlow, PyTorch, MXNet and ONNX runtime. +Intel® Neural Compressor requires installing the Intel-optimized framework version for the supported DL framework you use: TensorFlow, PyTorch, MXNet, or ONNX runtime. + +Note: Intel Neural Compressor supports Intel-optimized and official frameworks for some TensorFlow versions. Refer to [Supported Frameworks](README.md#Supported-Frameworks) for specifics. ### Validated Hardware/Software Environment @@ -192,7 +194,7 @@ Intel® Low Precision Optimization Tool requires installing the pertinent Intel- ## Validated Models -Intel® Low Precision Optimization Tool provides numerous examples to show promising accuracy loss with the best performance gain. A full quantized model list on various frameworks is available in the [Model List](docs/full_model_list.md). +Intel® Neural Compressor provides numerous examples to show promising accuracy loss with the best performance gain. A full quantized model list on various frameworks is available in the [Model List](docs/full_model_list.md). diff --git a/index.rst b/index.rst index d484d91af2d..2d5277012c7 100644 --- a/index.rst +++ b/index.rst @@ -1,5 +1,5 @@ -Intel® Low Precision Optimization Tool Documentation +Intel® Neural Compressor Documentation #################################################### Welcome to the project. @@ -19,7 +19,7 @@ Sections contributions.md legal_information.md security_policy.md - Intel® LPOT repository + Intel® Neural Compressor repository diff --git a/legal_information.md b/legal_information.md index c13d450930b..5c595853b8a 100644 --- a/legal_information.md +++ b/legal_information.md @@ -3,7 +3,7 @@ Legal Information ## License -Intel® Low Precision Optimization Tool is licensed under [Apache License Version 2.0](http://www.apache.org/licenses/LICENSE-2.0). This software includes components that have separate copyright notices and licensing terms. Your use of the source code for these components is subject to the terms and conditions of the following licenses. +Intel® Neural Compressor is licensed under [Apache License Version 2.0](http://www.apache.org/licenses/LICENSE-2.0). This software includes components that have separate copyright notices and licensing terms. Your use of the source code for these components is subject to the terms and conditions of the following licenses. Apache License Version 2.0: * [Intel TensorFlow Quantization Tool](https://github.com/IntelAI/tools) @@ -11,18 +11,18 @@ Apache License Version 2.0: MIT License: * [bayesian-optimization](https://github.com/fmfn/BayesianOptimization) -See the accompanying [license](https://github.com/intel/lpot/tree/master/LICENSE) file for full license text and copyright notices. +See the accompanying [license](https://github.com/intel/neural-compressor/tree/master/LICENSE) file for full license text and copyright notices. ## Citation -If you use Intel® Low Precision Optimization Tool in your research or you wish to refer to the tuning results published in the [Validated Models](getting_started.md#validated-models), use the following BibTeX entry. +If you use Intel® Neural Compressor in your research or you wish to refer to the tuning results published in the [Validated Models](getting_started.md#validated-models), use the following BibTeX entry. ``` -@misc{Intel® Low Precision Optimization Tool, +@misc{Intel® Neural Compressor, author = {Feng Tian, Chuanqi Wang, Guoming Zhang, Penghui Cheng, Pengxin Yuan, Haihao Shen, and Jiong Gong}, - title = {Intel® Low Precision Optimization Tool}, - howpublished = {\url{https://github.com/intel/lpot}}, + title = {Intel® Neural Compressor}, + howpublished = {\url{https://github.com/intel/neural-compressor}}, year = {2020} } ``` diff --git a/meta.yaml b/meta.yaml index 0c7dfbfa0f5..8c5afecf5ea 100644 --- a/meta.yaml +++ b/meta.yaml @@ -1,16 +1,15 @@ {% set version = "1.6" %} {% set buildnumber = 0 %} package: - name: lpot + name: neural-compressor version: {{version}} build: script_env: - - LPOT_WHL + - NC_WHL number: {{buildnumber}} - noarch: python - script: pip install --no-deps {{LPOT_WHL}} + script: pip install --no-deps {{NC_WHL}} entry_points: - - neural_compressor_bench = lpot.ux.neural_compressor_bench:main + - neural_compressor_bench = neural_compressor.ux.neural_compressor_bench:main requirements: build: - python @@ -39,17 +38,17 @@ requirements: - prettytable test: imports: - - lpot + - neural_compressor about: - home: https://github.com/intel/lpot + home: https://github.com/intel/neural-compressor license: Apache 2.0 license_family: Apache license_file: LICENSE description: ' LEGAL NOTICE: Use of this software package is subject to the software license agreement (as set forth above, in the license section of the installed Conda package and/or the README file) and all notices, disclaimers or license terms for third party or open source software included in or with the software.

- EULA: Apache 2.0
- Third Party Programs: https://github.com/intel/lpot/blob/master/third-party-programs.txt + EULA: Apache 2.0
+ Third Party Programs: https://github.com/intel/neural-compressor/blob/master/third-party-programs.txt

- Intel® Low Precision Optimization Tool. + Intel® Neural Compressor. ' diff --git a/lpot/__init__.py b/neural_compressor/__init__.py similarity index 100% rename from lpot/__init__.py rename to neural_compressor/__init__.py diff --git a/lpot/adaptor/__init__.py b/neural_compressor/adaptor/__init__.py similarity index 100% rename from lpot/adaptor/__init__.py rename to neural_compressor/adaptor/__init__.py diff --git a/lpot/adaptor/adaptor.py b/neural_compressor/adaptor/adaptor.py similarity index 97% rename from lpot/adaptor/adaptor.py rename to neural_compressor/adaptor/adaptor.py index 9e3bc5c6438..0eda5e19bcb 100644 --- a/lpot/adaptor/adaptor.py +++ b/neural_compressor/adaptor/adaptor.py @@ -17,7 +17,7 @@ from abc import abstractmethod -'''The framework backends supported by lpot, including tensorflow, mxnet and pytorch. +'''The framework backends supported by neural_compressor, including tensorflow, mxnet and pytorch. User could add new backend support by implementing new Adaptor subclass under this directory. The naming convention of new Adaptor subclass should be something like ABCAdaptor, user @@ -194,7 +194,7 @@ def convert(self, model, source, destinatin): '''The function is used to convert a source model format to another. Args: - model (lpot.model): base model to be converted. + model (neural_compressor.model): base model to be converted. source (string): The source model format. destination (string): The destination model format. ''' diff --git a/lpot/adaptor/engine.py b/neural_compressor/adaptor/engine.py similarity index 93% rename from lpot/adaptor/engine.py rename to neural_compressor/adaptor/engine.py index b448144276a..a4455bfc37f 100644 --- a/lpot/adaptor/engine.py +++ b/neural_compressor/adaptor/engine.py @@ -22,10 +22,10 @@ from collections import OrderedDict import yaml import numpy as np -from lpot.adaptor.adaptor import adaptor_registry, Adaptor -from lpot.adaptor.query import QueryBackendCapability -from lpot.utils.utility import LazyImport, dump_elapsed_time -from lpot.utils import logger +from neural_compressor.adaptor.adaptor import adaptor_registry, Adaptor +from neural_compressor.adaptor.query import QueryBackendCapability +from neural_compressor.utils.utility import LazyImport, dump_elapsed_time +from neural_compressor.utils import logger from ..utils.utility import OpPrecisionStatistics @@ -68,12 +68,12 @@ def quantize(self, tune_cfg, model, data_loader, q_func=None): """ assert q_func is None, "quantization aware training has not been supported on Deep engine" model = self.pre_optimized_model if self.pre_optimized_model else model - from lpot.model.engine_model import EngineModel + from neural_compressor.model.engine_model import EngineModel tmp_model = EngineModel(model.model) self.quantizable_ops = self._query_quantizable_ops(model) quantize_config = self._cfg_to_quantize_config(tune_cfg) iterations = tune_cfg.get('calib_iteration', 1) - from lpot.adaptor.engine_utils.engine_quantizer import EngineQuantizer + from neural_compressor.adaptor.engine_utils.engine_quantizer import EngineQuantizer quantizer = EngineQuantizer(tmp_model, data_loader, iterations, @@ -175,10 +175,10 @@ def evaluate(self, input_graph, dataloader, postprocess=None, Args: input_graph : model for evaluation - dataloader : dataloader for evaluation. lpot.data.dataloader.EngineDataLoader - postprocess : post-process for evalution. lpot.data.transform.EngineTransforms - metrics: : metrics for evaluation. lpot.metric.ONNXMetrics - measurer : lpot.objective.Measurer + dataloader : dataloader for evaluation. neural_compressor.data.dataloader.EngineDataLoader + postprocess : post-process for evalution. neural_compressor.data.transform.EngineTransforms + metrics: : metrics for evaluation. neural_compressor.metric.ONNXMetrics + measurer : neural_compressor.objective.Measurer iteration(int) : max iterations of evaluaton. tensorboard(bool): whether to use tensorboard for visualizaton fp32_baseline (boolen, optional): only for compare_label=False pipeline @@ -217,7 +217,7 @@ def evaluate(self, input_graph, dataloader, postprocess=None, break if self.fp32_preds_as_label: - from lpot.adaptor.engine_utils.util import collate_preds + from neural_compressor.adaptor.engine_utils.util import collate_preds if fp32_baseline: results = collate_preds(self.fp32_results) metric.update(results, results) diff --git a/lpot/adaptor/engine.yaml b/neural_compressor/adaptor/engine.yaml similarity index 100% rename from lpot/adaptor/engine.yaml rename to neural_compressor/adaptor/engine.yaml diff --git a/lpot/adaptor/engine_utils/__init__.py b/neural_compressor/adaptor/engine_utils/__init__.py similarity index 100% rename from lpot/adaptor/engine_utils/__init__.py rename to neural_compressor/adaptor/engine_utils/__init__.py diff --git a/lpot/adaptor/engine_utils/engine_quantizer.py b/neural_compressor/adaptor/engine_utils/engine_quantizer.py similarity index 100% rename from lpot/adaptor/engine_utils/engine_quantizer.py rename to neural_compressor/adaptor/engine_utils/engine_quantizer.py diff --git a/lpot/adaptor/engine_utils/util.py b/neural_compressor/adaptor/engine_utils/util.py similarity index 100% rename from lpot/adaptor/engine_utils/util.py rename to neural_compressor/adaptor/engine_utils/util.py diff --git a/lpot/adaptor/mxnet.py b/neural_compressor/adaptor/mxnet.py similarity index 87% rename from lpot/adaptor/mxnet.py rename to neural_compressor/adaptor/mxnet.py index 490003c4a2b..34a813d3e8c 100644 --- a/lpot/adaptor/mxnet.py +++ b/neural_compressor/adaptor/mxnet.py @@ -19,11 +19,11 @@ import yaml import logging -from lpot.adaptor.adaptor import adaptor_registry, Adaptor -from lpot.adaptor.query import QueryBackendCapability -from lpot.utils.utility import dump_elapsed_time, LazyImport, singleton +from neural_compressor.adaptor.adaptor import adaptor_registry, Adaptor +from neural_compressor.adaptor.query import QueryBackendCapability +from neural_compressor.utils.utility import dump_elapsed_time, LazyImport, singleton from collections import OrderedDict -from lpot.adaptor.mxnet_utils.util import * +from neural_compressor.adaptor.mxnet_utils.util import * from copy import deepcopy mx = LazyImport("mxnet") @@ -55,13 +55,13 @@ def __init__(self, framework_specific_info): assert self.ctx is not None, 'Unsupported device' @dump_elapsed_time("Pass quantize model") - def quantize(self, tune_cfg, lpot_model, dataloader, q_func=None): + def quantize(self, tune_cfg, nc_model, dataloader, q_func=None): """The function is used to do MXNet calibration and quanitization in post-training quantization. Args: tune_cfg (dict): quantization config. - lpot_model (object): lpot fp32 model to be quantized. + nc_model (object): neural_compressor fp32 model to be quantized. dataloader (object): calibration dataset. q_func (optional): training function for quantization aware training mode, unimplement yet for MXNet. @@ -75,18 +75,18 @@ def quantize(self, tune_cfg, lpot_model, dataloader, q_func=None): logger.debug("Dump quantization configurations:") logger.debug(quant_cfg) - calib_cache = lpot_model.calib_cache + calib_cache = nc_model.calib_cache - sym_model, calib_data = prepare_model_data(lpot_model, self.ctx, dataloader) + sym_model, calib_data = prepare_model_data(nc_model, self.ctx, dataloader) qsym_model, calib_tensors = quantize_sym_model(sym_model, self.ctx, quant_cfg) collector = self._collect_thresholds(sym_model, calib_data, calib_tensors, calib_cfg, calib_cache) qsym_model = calib_model(qsym_model, collector, calib_cfg, logger) qsym_model = fuse(qsym_model, self.ctx) # post-quantization fusion - q_lpot_model = make_lpot_model(lpot_model, qsym_model, self.ctx, calib_data.input_desc) - q_lpot_model.calib_cache['last'] = collector.th_dict - q_lpot_model.q_config = { + q_nc_model = make_nc_model(nc_model, qsym_model, self.ctx, calib_data.input_desc) + q_nc_model.calib_cache['last'] = collector.th_dict + q_nc_model.q_config = { 'mxnet_version': mx.__version__, 'quant_cfg': quant_cfg, 'calib_cfg': calib_cfg, @@ -94,7 +94,7 @@ def quantize(self, tune_cfg, lpot_model, dataloader, q_func=None): 'input_desc': calib_data.input_desc, 'framework_specific_info': {'device': self.ctx.device_type}} - return q_lpot_model + return q_nc_model def _collect_thresholds(self, sym_model, calib_data, calib_tensors, calib_cfg, calib_cache): """Calculate thresholds for each tensor. The calibration method can be min/max @@ -146,13 +146,13 @@ def b_filter(): collector.th_dict = th_dict return collector - def evaluate(self, lpot_model, data_x, postprocess=None, + def evaluate(self, nc_model, data_x, postprocess=None, metric=None, measurer=None, iteration=-1, tensorboard=False, fp32_baseline=False): """The function is used to run evaluation on validation dataset. Args: - lpot_model (object): model to evaluate. + nc_model (object): model to evaluate. data_x (object): data iterator/loader. postprocess (object, optional): process the result from the model metric (metric object): evaluate metric. @@ -190,23 +190,23 @@ def post_batch(net, batch, outs): if metric is not None: metric.update(out, label) - sym_model, dataloader = prepare_model_data(lpot_model, self.ctx, data_x) + sym_model, dataloader = prepare_model_data(nc_model, self.ctx, data_x) run_forward(sym_model, self.ctx, dataloader, b_filter(), pre_batch=pre_batch, post_batch=post_batch) return metric.result() if metric is not None else 0 @dump_elapsed_time('Query quantizable operators') - def query_fw_capability(self, lpot_model): + def query_fw_capability(self, nc_model): """Query MXNet quantization capability on the model/op level with the specific model. Args: - lpot_model (object): model to query. + nc_model (object): model to query. Returns: dict: modelwise and opwise config. """ # op_type_wise and op_wise capability - sym_model, self.qdataloader = prepare_model_data(lpot_model, self.ctx, + sym_model, self.qdataloader = prepare_model_data(nc_model, self.ctx, self.qdataloader) self.quantizable_nodes, self._tensor_to_node = query_quantizable_nodes( sym_model, self.ctx, self.qdataloader) @@ -224,7 +224,7 @@ def query_fw_capability(self, lpot_model): return {'optypewise': op_type_wise, 'opwise': op_wise} - def _inspect_tensor(self, lpot_model, data_x, node_list=[], iteration_list=[]): + def _inspect_tensor(self, nc_model, data_x, node_list=[], iteration_list=[]): def b_filter(): iteration_set = set(iteration_list) if len(iteration_set) == 0: @@ -237,7 +237,7 @@ def b_filter(): i += 1 yield run - sym_model, dataloader = prepare_model_data(lpot_model, self.ctx, data_x) + sym_model, dataloader = prepare_model_data(nc_model, self.ctx, data_x) collector = TensorCollector(node_list, self._qtensor_to_tensor, self._tensor_to_node) num_batches = run_forward(sym_model, self.ctx, dataloader, b_filter(), collector, pre_batch=collector.pre_batch) @@ -245,12 +245,12 @@ def b_filter(): self._qtensor_to_tensor = collector.qtensor_to_tensor return collector.tensors_dicts - def inspect_tensor(self, lpot_model, data_x, op_list=[], iteration_list=[], + def inspect_tensor(self, nc_model, data_x, op_list=[], iteration_list=[], inspect_type='activation', save_to_disk=False): """The function is used by tune strategy class for dumping tensor info. Args: - lpot_model (object): The model to do calibration. + nc_model (object): The model to do calibration. data_x (object): Data iterator/loader. op_list (list): list of inspect tensors. iteration_list (list): list of inspect iterations. @@ -261,15 +261,15 @@ def inspect_tensor(self, lpot_model, data_x, op_list=[], iteration_list=[], if inspect_type not in ['all', 'activation']: raise NotImplementedError() - tensor_dict_list = self._inspect_tensor(lpot_model, data_x, op_list, iteration_list) + tensor_dict_list = self._inspect_tensor(nc_model, data_x, op_list, iteration_list) for tensor_dict in tensor_dict_list: for key, tensors in tensor_dict.items(): for tensor_name, (is_quantized, tensor) in tensors.items(): tensor_dict[key][tensor_name] = tensor # discard is_quantized if is_quantized: assert tensor.dtype in QUANTIZATION_DTYPES - assert 'last' in lpot_model.calib_cache - min_th, max_th = lpot_model.calib_cache['last'][tensor_name] + assert 'last' in nc_model.calib_cache + min_th, max_th = nc_model.calib_cache['last'][tensor_name] tensor_dict[key][tensor_name] = mx.nd.contrib.dequantize( tensor, min_range=mx.nd.array([min_th]).squeeze(), @@ -277,7 +277,7 @@ def inspect_tensor(self, lpot_model, data_x, op_list=[], iteration_list=[], out_type='float32') tensor_dict[key][tensor_name] = tensor_dict[key][tensor_name].asnumpy() - # transform to format expected by lpot (assume only 1 tensor for now) + # transform to format expected by neural_compressor (assume only 1 tensor for now) node, op = key assert len(tensors) == 1, 'Multiple tensors from a single node are not supported' tensor = list(tensor_dict[key].values())[0] @@ -285,12 +285,12 @@ def inspect_tensor(self, lpot_model, data_x, op_list=[], iteration_list=[], return {'activation': tensor_dict_list} - def recover_tuned_model(self, lpot_model, q_config): + def recover_tuned_model(self, nc_model, q_config): """Execute the recover process on the specified model. Args: tune_cfg (dict): quantization configuration - lpot_model (object): fp32 model + nc_model (object): fp32 model q_config (dict): recover configuration Returns: @@ -300,7 +300,7 @@ def recover_tuned_model(self, lpot_model, q_config): logger.warning('Attempting to recover a model generated with a different ' 'version of MXNet ({})'.format(q_config['mxnet_version'])) - sym_model = prepare_model(lpot_model, self.ctx, q_config['input_desc']) + sym_model = prepare_model(nc_model, self.ctx, q_config['input_desc']) qsym_model, calib_tensors = quantize_sym_model(sym_model, self.ctx, q_config['quant_cfg']) collector = CalibCollector([], []) @@ -310,10 +310,10 @@ def recover_tuned_model(self, lpot_model, q_config): qsym_model = calib_model(qsym_model, collector, q_config['calib_cfg'], logger) qsym_model = fuse(qsym_model, self.ctx) # post-quantization fusion - q_lpot_model = make_lpot_model(lpot_model, qsym_model, self.ctx, q_config['input_desc']) - q_lpot_model.calib_cache['last'] = collector.th_dict - q_lpot_model.q_config = q_config - return q_lpot_model + q_nc_model = make_nc_model(nc_model, qsym_model, self.ctx, q_config['input_desc']) + q_nc_model.calib_cache['last'] = collector.th_dict + q_nc_model.q_config = q_config + return q_nc_model def set_tensor(self, model, tensor_dict): '''The function is used by tune strategy class for setting tensor back to model. @@ -353,7 +353,7 @@ def _one_shot_query(self): except Exception as e: logger.info("Fail to parse {} due to {}.".format(self.cfg, str(e))) self.cur_config = None - raise ValueError("Please check if the format of {} follows LPOT yaml schema.". + raise ValueError("Please check if the format of {} follows Neural Compressor yaml schema.". format(self.cfg)) def _get_specified_version_cfg(self, data): diff --git a/lpot/adaptor/mxnet.yaml b/neural_compressor/adaptor/mxnet.yaml similarity index 100% rename from lpot/adaptor/mxnet.yaml rename to neural_compressor/adaptor/mxnet.yaml diff --git a/lpot/adaptor/mxnet_utils/__init__.py b/neural_compressor/adaptor/mxnet_utils/__init__.py similarity index 100% rename from lpot/adaptor/mxnet_utils/__init__.py rename to neural_compressor/adaptor/mxnet_utils/__init__.py diff --git a/lpot/adaptor/mxnet_utils/util.py b/neural_compressor/adaptor/mxnet_utils/util.py similarity index 96% rename from lpot/adaptor/mxnet_utils/util.py rename to neural_compressor/adaptor/mxnet_utils/util.py index df6909cc2b3..b2a11b40014 100644 --- a/lpot/adaptor/mxnet_utils/util.py +++ b/neural_compressor/adaptor/mxnet_utils/util.py @@ -22,8 +22,8 @@ import numpy as np from tempfile import TemporaryDirectory -from lpot.utils.utility import LazyImport -from lpot.model.model import MXNetModel as LPOTModel +from neural_compressor.utils.utility import LazyImport +from neural_compressor.model.model import MXNetModel as NCModel mx = LazyImport("mxnet") @@ -71,8 +71,8 @@ def ensure_list(x): return x if isinstance(x, (tuple, list)) else [x] -def make_lpot_model(target, sym_model, ctx, input_desc): - """Converts a symbolic model to an LPOT model. +def make_nc_model(target, sym_model, ctx, input_desc): + """Converts a symbolic model to an Neural Compressor model. Args: target (object): target model type to return. @@ -80,13 +80,13 @@ def make_lpot_model(target, sym_model, ctx, input_desc): input_desc (list): model input data description. Returns: - LPOTModel: converted lpot model + NCModel: converted neural_compressor model """ assert isinstance(sym_model, tuple) and isinstance(sym_model[0], mx.symbol.Symbol) if isinstance(target.model, mx.gluon.HybridBlock): - return LPOTModel(make_symbol_block(sym_model, ctx, input_desc)) - return LPOTModel(sym_model) + return NCModel(make_symbol_block(sym_model, ctx, input_desc)) + return NCModel(sym_model) def fuse(sym_model, ctx): @@ -106,25 +106,25 @@ def fuse(sym_model, ctx): return (symnet, args, auxs) -def prepare_model_data(lpot_model, ctx, data_x): +def prepare_model_data(nc_model, ctx, data_x): """Prepares sym_model and dataloader needed for quantization, calibration or running. Args: - lpot_model (object): model to prepare. + nc_model (object): model to prepare. data_x (object): data iterator/loader to prepare. Returns: tuple: symbol model (symnet, args, auxs) and DataLoaderWrap. """ - dataloader = prepare_dataloader(lpot_model, ctx, data_x) - sym_model = prepare_model(lpot_model, ctx, dataloader.input_desc) + dataloader = prepare_dataloader(nc_model, ctx, data_x) + sym_model = prepare_model(nc_model, ctx, dataloader.input_desc) return sym_model, dataloader -def prepare_model(lpot_model, ctx, input_desc): - assert isinstance(lpot_model, LPOTModel) +def prepare_model(nc_model, ctx, input_desc): + assert isinstance(nc_model, NCModel) - model_x = lpot_model.model + model_x = nc_model.model if isinstance(model_x, mx.gluon.HybridBlock): if len(model_x._cached_graph) == 0: model_x.hybridize() @@ -143,8 +143,8 @@ def prepare_model(lpot_model, ctx, input_desc): return sym_model -def prepare_dataloader(lpot_model, ctx, data_x): - assert isinstance(lpot_model, LPOTModel) +def prepare_dataloader(nc_model, ctx, data_x): + assert isinstance(nc_model, NCModel) if isinstance(data_x, DataLoaderWrap): return data_x @@ -154,7 +154,7 @@ def prepare_dataloader(lpot_model, ctx, data_x): dataloader = DataIterLoader(dataloader) assert isiterable(dataloader), 'Dataloader must be iterable (mx.gluon.data.DataLoader-like)' - model_x = lpot_model.model + model_x = nc_model.model if isinstance(model_x, mx.gluon.HybridBlock): data = ensure_list(next(iter(dataloader))) # data example data = [d.as_in_context(ctx) for d in data] @@ -443,7 +443,7 @@ def parse_tune_config(tune_cfg, quantizable_nodes): """Convert the strategy config to MXNet quantization config. Args: - tune_cfg (dict): tune config from lpot strategy. + tune_cfg (dict): tune config from neural_compressor strategy. quantizable_nodes (list): quantizable nodes in the model. Returns: diff --git a/lpot/adaptor/onnxrt.py b/neural_compressor/adaptor/onnxrt.py similarity index 95% rename from lpot/adaptor/onnxrt.py rename to neural_compressor/adaptor/onnxrt.py index 3e25cd92a98..27133dccee8 100644 --- a/lpot/adaptor/onnxrt.py +++ b/neural_compressor/adaptor/onnxrt.py @@ -24,9 +24,9 @@ import yaml import numpy as np from distutils.version import StrictVersion -from lpot.adaptor.adaptor import adaptor_registry, Adaptor -from lpot.adaptor.query import QueryBackendCapability -from lpot.utils.utility import LazyImport, dump_elapsed_time +from neural_compressor.adaptor.adaptor import adaptor_registry, Adaptor +from neural_compressor.adaptor.query import QueryBackendCapability +from neural_compressor.utils.utility import LazyImport, dump_elapsed_time from ..utils.utility import OpPrecisionStatistics onnx = LazyImport("onnx") @@ -82,7 +82,7 @@ def quantize(self, tune_cfg, model, data_loader, q_func=None): return model if model.model.opset_import[0].version < 11: # pragma: no cover logger.warning("Quantize input needs model opset 11 or newer.") - from lpot.adaptor.ox_utils.onnx_quantizer import ONNXQuantizer + from neural_compressor.adaptor.ox_utils.onnx_quantizer import ONNXQuantizer from onnxruntime.quantization.quant_utils import QuantizationMode backend = QuantizationMode.QLinearOps if self.backend == \ "qlinearops" else QuantizationMode.IntegerOps @@ -153,7 +153,7 @@ def recover(self, model, q_config): if model.model.opset_import[0].version < 11: # pragma: no cover logger.warning("Quantize input needs model opset 11 or newer.") - from lpot.adaptor.ox_utils.onnx_quantizer import ONNXQuantizer + from neural_compressor.adaptor.ox_utils.onnx_quantizer import ONNXQuantizer from onnxruntime.quantization.quant_utils import QuantizationMode backend = QuantizationMode.QLinearOps if self.backend == \ "qlinearops" else QuantizationMode.IntegerOps @@ -251,8 +251,8 @@ def _dump_model_op_stastics(self, model): OpPrecisionStatistics(output_data).print_stat() def _get_quantize_params(self, model, data_loader, quantize_config, iterations): - from lpot.adaptor.ox_utils.onnxrt_mid import ONNXRTAugment - from lpot.model.onnx_model import ONNXModel + from neural_compressor.adaptor.ox_utils.onnxrt_mid import ONNXRTAugment + from neural_compressor.model.onnx_model import ONNXModel if not isinstance(model, ONNXModel): model = ONNXModel(model) black_nodes = [node for node in quantize_config if quantize_config[node]=='fp32'] @@ -271,8 +271,8 @@ def inspect_tensor(self, model, data_loader, op_list=[], save_to_disk=False): '''The function is used by tune strategy class for dumping tensor info. ''' - from lpot.adaptor.ox_utils.onnxrt_mid import ONNXRTAugment - from lpot.model.onnx_model import ONNXModel + from neural_compressor.adaptor.ox_utils.onnxrt_mid import ONNXRTAugment + from neural_compressor.model.onnx_model import ONNXModel if not isinstance(model, ONNXModel): model = ONNXModel(model) if len(op_list) > 0 and isinstance(op_list, KeysView): @@ -289,9 +289,9 @@ def inspect_tensor(self, model, data_loader, op_list=[], def set_tensor(self, model, tensor_dict): from onnx import numpy_helper - from lpot.model.onnx_model import ONNXModel - from lpot.adaptor.ox_utils.util import quantize_data_with_scale_zero - from lpot.adaptor.ox_utils.util import quantize_data_per_channel + from neural_compressor.model.onnx_model import ONNXModel + from neural_compressor.adaptor.ox_utils.util import quantize_data_with_scale_zero + from neural_compressor.adaptor.ox_utils.util import quantize_data_per_channel if not isinstance(model, ONNXModel): model = ONNXModel(model) assert "QuantizeLinear" in [node.op_type for node in model.model.graph.node], \ @@ -347,7 +347,7 @@ def _requantize_bias(self, model, bias_name, bias_data): return new_bias_data def _pre_optimize(self, model, level=1): - from lpot.adaptor.ox_utils.util import split_shared_input + from neural_compressor.adaptor.ox_utils.util import split_shared_input model = split_shared_input(model) sess_options = ort.SessionOptions() level = self.query_handler.get_graph_optimization() # pylint: disable=no-member @@ -364,18 +364,18 @@ def _rename_node(self, model): node_names = [i.name for i in model.graph.node] if len(set(node_names)) < len(node_names): logger.warning("This model has nodes with the same name, please check \ - renamed_model.onnx in workspace_path (default is lpot_workspace) \ + renamed_model.onnx in workspace_path (default is nc_workspace) \ for newly generated node name") for idx, node in enumerate(model.graph.node): if node_names.count(node.name) > 1: - node.name = node.op_type + '_lpot_rename_' + str(idx) + node.name = node.op_type + '_nc_rename_' + str(idx) onnx.save(model, os.path.join(self.work_space, "renamed_model.onnx")) return model def _replace_gemm_with_matmul(self, model): new_nodes = [] from onnx import numpy_helper - from lpot.model.onnx_model import ONNXModel + from neural_compressor.model.onnx_model import ONNXModel if not isinstance(model, ONNXModel): model = ONNXModel(model) @@ -533,10 +533,10 @@ def evaluate(self, input_graph, dataloader, postprocess=None, Args: input_graph : onnx model for evaluation - dataloader : dataloader for evaluation. lpot.data.dataloader.ONNXDataLoader - postprocess : post-process for evalution. lpot.data.transform.ONNXTransforms - metrics: : metrics for evaluation. lpot.metric.ONNXMetrics - measurer : lpot.objective.Measurer + dataloader : dataloader for evaluation. neural_compressor.data.dataloader.ONNXDataLoader + postprocess : post-process for evalution. neural_compressor.data.transform.ONNXTransforms + metrics: : metrics for evaluation. neural_compressor.metric.ONNXMetrics + measurer : neural_compressor.objective.Measurer iteration(int) : max iterations of evaluaton. tensorboard(bool): whether to use tensorboard for visualizaton fp32_baseline (boolen, optional): only for compare_label=False pipeline @@ -595,7 +595,7 @@ def evaluate(self, input_graph, dataloader, postprocess=None, break if self.fp32_preds_as_label: - from lpot.adaptor.ox_utils.util import collate_preds + from neural_compressor.adaptor.ox_utils.util import collate_preds if fp32_baseline: results = collate_preds(self.fp32_results) metric.update(results, results) @@ -663,7 +663,7 @@ def _one_shot_query(self): except Exception as e: # pragma: no cover logger.info("Fail to parse {} due to {}.".format(self.cfg, str(e))) self.cur_config = None - raise ValueError("Please check if the format of {} follows LPOT yaml schema.". + raise ValueError("Please check if the format of {} follows Neural Compressor yaml schema.". format(self.cfg)) def _get_specified_version_cfg(self, data): diff --git a/lpot/adaptor/onnxrt_integer.yaml b/neural_compressor/adaptor/onnxrt_integer.yaml similarity index 100% rename from lpot/adaptor/onnxrt_integer.yaml rename to neural_compressor/adaptor/onnxrt_integer.yaml diff --git a/lpot/adaptor/onnxrt_qlinear.yaml b/neural_compressor/adaptor/onnxrt_qlinear.yaml similarity index 100% rename from lpot/adaptor/onnxrt_qlinear.yaml rename to neural_compressor/adaptor/onnxrt_qlinear.yaml diff --git a/lpot/adaptor/ox_utils/__init__.py b/neural_compressor/adaptor/ox_utils/__init__.py similarity index 100% rename from lpot/adaptor/ox_utils/__init__.py rename to neural_compressor/adaptor/ox_utils/__init__.py diff --git a/lpot/adaptor/ox_utils/onnx_quantizer.py b/neural_compressor/adaptor/ox_utils/onnx_quantizer.py similarity index 99% rename from lpot/adaptor/ox_utils/onnx_quantizer.py rename to neural_compressor/adaptor/ox_utils/onnx_quantizer.py index 2df2188acb2..5b8fb65dd24 100644 --- a/lpot/adaptor/ox_utils/onnx_quantizer.py +++ b/neural_compressor/adaptor/ox_utils/onnx_quantizer.py @@ -36,11 +36,11 @@ generate_identified_filename, attribute_to_kwarg, type_to_name from onnxruntime.quantization.quant_utils import onnx_domain, __producer__, __version__ -from lpot.adaptor.ox_utils.registry import CreateOpQuantizer, CreateDefaultOpQuantizer -from lpot.adaptor.ox_utils.util import quantize_data_with_scale_zero, quantize_data, \ +from neural_compressor.adaptor.ox_utils.registry import CreateOpQuantizer, CreateDefaultOpQuantizer +from neural_compressor.adaptor.ox_utils.util import quantize_data_with_scale_zero, quantize_data, \ QuantizedValue, QuantizedInitializer -from lpot.model.onnx_model import ONNXModel -from lpot.utils.utility import CpuInfo +from neural_compressor.model.onnx_model import ONNXModel +from neural_compressor.utils.utility import CpuInfo def _get_qrange_for_qType(qType, reduce_range=False): diff --git a/lpot/adaptor/ox_utils/onnxrt_mid.py b/neural_compressor/adaptor/ox_utils/onnxrt_mid.py similarity index 99% rename from lpot/adaptor/ox_utils/onnxrt_mid.py rename to neural_compressor/adaptor/ox_utils/onnxrt_mid.py index 520e76d8905..7dbc3967313 100644 --- a/lpot/adaptor/ox_utils/onnxrt_mid.py +++ b/neural_compressor/adaptor/ox_utils/onnxrt_mid.py @@ -31,7 +31,7 @@ import onnx.numpy_helper as numpy_helper from onnx import helper, TensorProto, shape_inference from distutils.version import StrictVersion -from lpot.model.onnx_model import ONNXModel +from neural_compressor.model.onnx_model import ONNXModel logger = logging.getLogger() ONNX18_VERSION = StrictVersion("1.8.0") diff --git a/lpot/adaptor/ox_utils/operators/__init__.py b/neural_compressor/adaptor/ox_utils/operators/__init__.py similarity index 100% rename from lpot/adaptor/ox_utils/operators/__init__.py rename to neural_compressor/adaptor/ox_utils/operators/__init__.py diff --git a/lpot/adaptor/ox_utils/operators/activation.py b/neural_compressor/adaptor/ox_utils/operators/activation.py similarity index 98% rename from lpot/adaptor/ox_utils/operators/activation.py rename to neural_compressor/adaptor/ox_utils/operators/activation.py index cf6b24a6144..d13882850f2 100644 --- a/lpot/adaptor/ox_utils/operators/activation.py +++ b/neural_compressor/adaptor/ox_utils/operators/activation.py @@ -21,7 +21,7 @@ from onnxruntime.quantization.quant_utils import QuantizedValueType, \ attribute_to_kwarg, ms_domain from onnx import onnx_pb as onnx_proto -from lpot.adaptor.ox_utils.util import QuantizedValue +from neural_compressor.adaptor.ox_utils.util import QuantizedValue class QLinearActivation(QuantOperatorBase): diff --git a/lpot/adaptor/ox_utils/operators/attention.py b/neural_compressor/adaptor/ox_utils/operators/attention.py similarity index 100% rename from lpot/adaptor/ox_utils/operators/attention.py rename to neural_compressor/adaptor/ox_utils/operators/attention.py diff --git a/lpot/adaptor/ox_utils/operators/base_operator.py b/neural_compressor/adaptor/ox_utils/operators/base_operator.py similarity index 100% rename from lpot/adaptor/ox_utils/operators/base_operator.py rename to neural_compressor/adaptor/ox_utils/operators/base_operator.py diff --git a/lpot/adaptor/ox_utils/operators/binary_op.py b/neural_compressor/adaptor/ox_utils/operators/binary_op.py similarity index 97% rename from lpot/adaptor/ox_utils/operators/binary_op.py rename to neural_compressor/adaptor/ox_utils/operators/binary_op.py index 719d9081fca..ae7ce6cf997 100644 --- a/lpot/adaptor/ox_utils/operators/binary_op.py +++ b/neural_compressor/adaptor/ox_utils/operators/binary_op.py @@ -21,7 +21,7 @@ from onnxruntime.quantization.quant_utils import attribute_to_kwarg, ms_domain, \ QuantizedValueType from onnx import onnx_pb as onnx_proto -from lpot.adaptor.ox_utils.util import QuantizedValue +from neural_compressor.adaptor.ox_utils.util import QuantizedValue class QLinearBinaryOp(QuantOperatorBase): def __init__(self, onnx_quantizer, onnx_node): diff --git a/lpot/adaptor/ox_utils/operators/conv.py b/neural_compressor/adaptor/ox_utils/operators/conv.py similarity index 99% rename from lpot/adaptor/ox_utils/operators/conv.py rename to neural_compressor/adaptor/ox_utils/operators/conv.py index d778db951fb..c2d3e460331 100644 --- a/lpot/adaptor/ox_utils/operators/conv.py +++ b/neural_compressor/adaptor/ox_utils/operators/conv.py @@ -21,7 +21,7 @@ from onnxruntime.quantization.quant_utils import find_by_name, get_mul_node, \ QuantizedValueType, attribute_to_kwarg from onnx import onnx_pb as onnx_proto -from lpot.adaptor.ox_utils.util import QuantizedValue +from neural_compressor.adaptor.ox_utils.util import QuantizedValue class ConvInteger(QuantOperatorBase): def __init__(self, onnx_quantizer, onnx_node): diff --git a/lpot/adaptor/ox_utils/operators/embed_layernorm.py b/neural_compressor/adaptor/ox_utils/operators/embed_layernorm.py similarity index 100% rename from lpot/adaptor/ox_utils/operators/embed_layernorm.py rename to neural_compressor/adaptor/ox_utils/operators/embed_layernorm.py diff --git a/lpot/adaptor/ox_utils/operators/gather.py b/neural_compressor/adaptor/ox_utils/operators/gather.py similarity index 96% rename from lpot/adaptor/ox_utils/operators/gather.py rename to neural_compressor/adaptor/ox_utils/operators/gather.py index f17c6b58677..38e569f608a 100644 --- a/lpot/adaptor/ox_utils/operators/gather.py +++ b/neural_compressor/adaptor/ox_utils/operators/gather.py @@ -20,7 +20,7 @@ from .base_operator import QuantOperatorBase from onnxruntime.quantization.quant_utils import QuantizedValueType from onnx import onnx_pb as onnx_proto -from lpot.adaptor.ox_utils.util import QuantizedValue +from neural_compressor.adaptor.ox_utils.util import QuantizedValue ''' Quantize Gather ''' diff --git a/lpot/adaptor/ox_utils/operators/gavgpool.py b/neural_compressor/adaptor/ox_utils/operators/gavgpool.py similarity index 96% rename from lpot/adaptor/ox_utils/operators/gavgpool.py rename to neural_compressor/adaptor/ox_utils/operators/gavgpool.py index b67aa0fbe39..0a24debe924 100644 --- a/lpot/adaptor/ox_utils/operators/gavgpool.py +++ b/neural_compressor/adaptor/ox_utils/operators/gavgpool.py @@ -20,7 +20,7 @@ from .base_operator import QuantOperatorBase from onnxruntime.quantization.quant_utils import attribute_to_kwarg, ms_domain, \ QuantizedValueType -from lpot.adaptor.ox_utils.util import QuantizedValue +from neural_compressor.adaptor.ox_utils.util import QuantizedValue class QGlobalAveragePool(QuantOperatorBase): def __init__(self, onnx_quantizer, onnx_node): super().__init__(onnx_quantizer, onnx_node) diff --git a/lpot/adaptor/ox_utils/operators/lstm.py b/neural_compressor/adaptor/ox_utils/operators/lstm.py similarity index 100% rename from lpot/adaptor/ox_utils/operators/lstm.py rename to neural_compressor/adaptor/ox_utils/operators/lstm.py diff --git a/lpot/adaptor/ox_utils/operators/matmul.py b/neural_compressor/adaptor/ox_utils/operators/matmul.py similarity index 99% rename from lpot/adaptor/ox_utils/operators/matmul.py rename to neural_compressor/adaptor/ox_utils/operators/matmul.py index a09701c35ac..79a38bafbc1 100644 --- a/lpot/adaptor/ox_utils/operators/matmul.py +++ b/neural_compressor/adaptor/ox_utils/operators/matmul.py @@ -21,7 +21,7 @@ from onnxruntime.quantization.quant_utils import find_by_name, get_mul_node, \ QuantizedValueType from onnx import onnx_pb as onnx_proto -from lpot.adaptor.ox_utils.util import QuantizedValue +from neural_compressor.adaptor.ox_utils.util import QuantizedValue ''' Used when quantize mode is QuantizationMode.IntegerOps. ''' diff --git a/lpot/adaptor/ox_utils/operators/maxpool.py b/neural_compressor/adaptor/ox_utils/operators/maxpool.py similarity index 97% rename from lpot/adaptor/ox_utils/operators/maxpool.py rename to neural_compressor/adaptor/ox_utils/operators/maxpool.py index b365ef20f13..b929198c0cc 100644 --- a/lpot/adaptor/ox_utils/operators/maxpool.py +++ b/neural_compressor/adaptor/ox_utils/operators/maxpool.py @@ -20,7 +20,7 @@ from .base_operator import QuantOperatorBase from onnxruntime.quantization.quant_utils import QuantizedValueType from onnx import onnx_pb as onnx_proto -from lpot.adaptor.ox_utils.util import QuantizedValue +from neural_compressor.adaptor.ox_utils.util import QuantizedValue class QMaxPool(QuantOperatorBase): diff --git a/lpot/adaptor/ox_utils/operators/pad.py b/neural_compressor/adaptor/ox_utils/operators/pad.py similarity index 98% rename from lpot/adaptor/ox_utils/operators/pad.py rename to neural_compressor/adaptor/ox_utils/operators/pad.py index 2ffb355737e..77ab1ed4156 100644 --- a/lpot/adaptor/ox_utils/operators/pad.py +++ b/neural_compressor/adaptor/ox_utils/operators/pad.py @@ -21,7 +21,7 @@ from onnxruntime.quantization.quant_utils import QuantizedValueType, \ attribute_to_kwarg from .base_operator import QuantOperatorBase -from lpot.adaptor.ox_utils.util import QuantizedValue +from neural_compressor.adaptor.ox_utils.util import QuantizedValue class QPad(QuantOperatorBase): def __init__(self, onnx_quantizer, onnx_node): diff --git a/lpot/adaptor/ox_utils/operators/split.py b/neural_compressor/adaptor/ox_utils/operators/split.py similarity index 97% rename from lpot/adaptor/ox_utils/operators/split.py rename to neural_compressor/adaptor/ox_utils/operators/split.py index 483be104a47..117af6a6041 100644 --- a/lpot/adaptor/ox_utils/operators/split.py +++ b/neural_compressor/adaptor/ox_utils/operators/split.py @@ -20,7 +20,7 @@ from onnxruntime.quantization.quant_utils import QuantizedValueType, \ attribute_to_kwarg from .base_operator import QuantOperatorBase -from lpot.adaptor.ox_utils.util import QuantizedValue +from neural_compressor.adaptor.ox_utils.util import QuantizedValue class QSplit(QuantOperatorBase): def __init__(self, onnx_quantizer, onnx_node): diff --git a/lpot/adaptor/ox_utils/registry.py b/neural_compressor/adaptor/ox_utils/registry.py similarity index 100% rename from lpot/adaptor/ox_utils/registry.py rename to neural_compressor/adaptor/ox_utils/registry.py diff --git a/lpot/adaptor/ox_utils/util.py b/neural_compressor/adaptor/ox_utils/util.py similarity index 99% rename from lpot/adaptor/ox_utils/util.py rename to neural_compressor/adaptor/ox_utils/util.py index bad00c9119c..38acd0462e3 100644 --- a/lpot/adaptor/ox_utils/util.py +++ b/neural_compressor/adaptor/ox_utils/util.py @@ -28,7 +28,7 @@ def split_shared_input(model): for node in node_list[1:]: for i, node_input_name in enumerate(node.input): if node_input_name == input_name: - new_input_name = node_input_name + '_lpot_split_' + node.name + new_input_name = node_input_name + '_nc_split_' + node.name new_input = helper.make_tensor( new_input_name, model.get_initializer(input_name).data_type, diff --git a/lpot/adaptor/pytorch.py b/neural_compressor/adaptor/pytorch.py similarity index 98% rename from lpot/adaptor/pytorch.py rename to neural_compressor/adaptor/pytorch.py index 8fac750125a..53df60bd188 100644 --- a/lpot/adaptor/pytorch.py +++ b/neural_compressor/adaptor/pytorch.py @@ -22,7 +22,7 @@ from distutils.version import LooseVersion import yaml from functools import partial -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from .adaptor import adaptor_registry, Adaptor from ..utils.utility import LazyImport, CpuInfo from ..utils.utility import OpPrecisionStatistics @@ -693,7 +693,7 @@ def query_fw_capability(self, model): """This is a helper function to get all quantizable ops from model. Args: - model (object): input model which is LPOT model + model (object): input model which is Neural Compressor model Returns: q_capability (dictionary): tuning capability for each op from model. @@ -913,8 +913,8 @@ def quantize(self, tune_cfg, model, dataloader, q_func=None): else: # pragma: no cover torch.quantization.add_observer_(q_model.model) torch.quantization.convert(q_model.model, self.q_mapping, inplace=True) - # q_func can be created by lpot internal or passed by user. It's critical to - # distinguish how q_func is passed since lpot built-in functions accept lpot + # q_func can be created by neural_compressor internal or passed by user. It's critical to + # distinguish how q_func is passed since neural_compressor built-in functions accept neural_compressor # model and user defined func should accept framework model. q_func(q_model if getattr(q_func, 'builtin', None) else q_model.model) q_model.model.eval() @@ -1052,7 +1052,7 @@ def train(self, model, dataloader, optimizer_tuple, criterion_tuple, hooks, **kw None """ model_ = model.model - # self.model is set to lpot model here to hold the inplace change in FWK model. + # self.model is set to neural_compressor model here to hold the inplace change in FWK model. self.model = model optimizer = optimizer_tuple[0](model_.parameters(), **optimizer_tuple[1]) self.optimizer = optimizer @@ -1113,7 +1113,7 @@ def _dump_model_op_stastics(self, model, tune_cfg): """ res = {} modules = dict(model.named_modules()) - # fetch quantizable ops supported in LPOT from tune_cfg + # fetch quantizable ops supported in Neural Compressor from tune_cfg for key in tune_cfg['op']: op_name = key[0] op_type = str(type(modules[op_name])).rstrip('\'>').split('.')[-1] @@ -1835,7 +1835,7 @@ def quantize(self, tune_cfg, model, dataloader, q_func=None): Args: tune_cfg (dict): quantization config. - model (object): model need to do quantization, it is LPOT model. + model (object): model need to do quantization, it is Neural Compressor model. dataloader (object): calibration dataset. q_func (objext, optional): training function for quantization aware training mode. @@ -1936,7 +1936,7 @@ def evaluate(self, model, dataloader, postprocess=None, """Execute the evaluate process on the specified model. Args: - model (object): LPOT model to run evaluation. + model (object): Neural Compressor model to run evaluation. dataloader (object): evaluation dataset. postprocess (object, optional): process function after evaluation. metric (object, optional): metric function. @@ -2028,10 +2028,10 @@ def _get_quantizable_ops_recursively(self, model, prefix, quantizable_ops): @dump_elapsed_time("Pass save quantized model") def save(self, model, path=None): - """The function is used by tune strategy class for set best configure in LPOT model. + """The function is used by tune strategy class for set best configure in Neural Compressor model. Args: - model (object): The LPOT model which is best results. + model (object): The Neural Compressor model which is best results. path (string): No used. Returns: @@ -2157,8 +2157,8 @@ def quantize(self, tune_cfg, model, dataloader, q_func=None): prepare_custom_config_dict=q_model.kwargs['prepare_custom_config_dict'] if q_model.kwargs is not None and q_model.kwargs.__contains__('prepare_custom_config_dict') else None) - # q_func can be created by lpot internal or passed by user. It's critical to - # distinguish how q_func is passed since lpot built-in functions accept lpot + # q_func can be created by neural_compressor internal or passed by user. It's critical to + # distinguish how q_func is passed since neural_compressor built-in functions accept neural_compressor # model and user defined func should accept framework model. q_func(q_model if getattr(q_func, 'builtin', None) else q_model.model) q_model.model.eval() @@ -2258,7 +2258,7 @@ def _pre_hook_for_qat(self): # pragma: no cover qscheme=torch.per_tensor_affine, reduce_range=REDUCE_RANGE), weight=torch.quantization.default_weight_fake_quant) - # prepare_qat_fx can not change inplaced. Use lpot model to hold FWK model and manually + # prepare_qat_fx can not change inplaced. Use neural_compressor model to hold FWK model and manually # change the model. self.model.model = prepare_qat_fx(self.model.model, {"": qconfig}) @@ -2334,7 +2334,7 @@ def _dump_model_op_stastics(self, model, tune_cfg, approach): modules = dict(model.named_modules()) res = {} if approach == 'post_training_dynamic_quant': - # fetch int8 and fp32 ops set by LPOT from tune_cfg + # fetch int8 and fp32 ops set by Neural Compressor from tune_cfg for key in tune_cfg['op']: op_type = str(type(modules[key[0]])).rstrip('\'>').split('.')[-1] #build initial dict @@ -2482,7 +2482,7 @@ def _one_shot_query(self): except Exception as e: # pragma: no cover logger.info("Fail to parse {} due to {}".format(self.cfg, str(e))) self.cur_config = None - raise ValueError("Please check if the format of {} follows LPOT yaml schema.". + raise ValueError("Please check if the format of {} follows Neural Compressor yaml schema.". format(self.cfg)) def get_quantization_capability(self): diff --git a/lpot/adaptor/pytorch_cpu.yaml b/neural_compressor/adaptor/pytorch_cpu.yaml similarity index 100% rename from lpot/adaptor/pytorch_cpu.yaml rename to neural_compressor/adaptor/pytorch_cpu.yaml diff --git a/lpot/adaptor/pytorch_gpu.yaml b/neural_compressor/adaptor/pytorch_gpu.yaml similarity index 100% rename from lpot/adaptor/pytorch_gpu.yaml rename to neural_compressor/adaptor/pytorch_gpu.yaml diff --git a/lpot/adaptor/pytorch_ipex.yaml b/neural_compressor/adaptor/pytorch_ipex.yaml similarity index 100% rename from lpot/adaptor/pytorch_ipex.yaml rename to neural_compressor/adaptor/pytorch_ipex.yaml diff --git a/lpot/adaptor/query.py b/neural_compressor/adaptor/query.py similarity index 100% rename from lpot/adaptor/query.py rename to neural_compressor/adaptor/query.py diff --git a/lpot/adaptor/tensorflow.py b/neural_compressor/adaptor/tensorflow.py similarity index 98% rename from lpot/adaptor/tensorflow.py rename to neural_compressor/adaptor/tensorflow.py index 918dd54e9f5..9527c2cc5de 100644 --- a/lpot/adaptor/tensorflow.py +++ b/neural_compressor/adaptor/tensorflow.py @@ -100,7 +100,7 @@ def train(self, model, dataloader, optimizer_tuple, # check model is savedmodel or not import tensorflow as tf from tensorflow import keras - from lpot.model.model import get_model_type + from neural_compressor.model.model import get_model_type assert get_model_type(model._model) == 'keras', "Support SavedModel only" input_model = tf.keras.models.load_model(model._model) @@ -108,16 +108,16 @@ def train(self, model, dataloader, optimizer_tuple, optimizer = optimizer_tuple[0](**optimizer_tuple[1]) criterion = criterion_tuple[0](**criterion_tuple[1]) class TfPruningCallback(keras.callbacks.Callback): - def __init__(self, lpot_model, hooks): + def __init__(self, nc_model, hooks): self.hooks = hooks - self.lpot_model = lpot_model + self.nc_model = nc_model def _set_weights(self): res = {} for index, layer in enumerate(self.model.layers): if len(layer.weights): res[index] = layer.get_weights()[0] - self.lpot_model.weights = res + self.nc_model.weights = res def on_train_begin(self, logs=None): self.hooks['pre_epoch_begin']() @@ -344,7 +344,7 @@ def evaluate(self, model, dataloader, postprocess=None, return acc def tuning_cfg_to_fw(self, tuning_cfg): - """Parse the lpot wrapped configuration to Tensorflow. + """Parse the neural_compressor wrapped configuration to Tensorflow. Args: tuning_cfg (dict): configuration for quantization. @@ -413,7 +413,7 @@ def quantize(self, tune_cfg, model, data_loader, q_func=None): assert q_func is not None, "quantization aware training mode \ is not configured correctly" - from lpot.experimental import common + from neural_compressor.experimental import common qat_model = q_func(model) return self.convert(common.Model(qat_model), 'QAT', 'default') @@ -632,7 +632,7 @@ def _get_fp32_op_name(model, tensor_name): return is_weight, is_biasadd, current_node_name, last_node_name - from lpot.adaptor.tf_utils.graph_rewriter.graph_util import GraphRewriterHelper as Helper + from neural_compressor.adaptor.tf_utils.graph_rewriter.graph_util import GraphRewriterHelper as Helper from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_util from tensorflow.core.framework import attr_value_pb2 @@ -859,7 +859,7 @@ def convert(self, model, source, destination): '''The function is used to convert a source model format to another. Args: - model (lpot.model): base model to be converted. + model (neural_compressor.model): base model to be converted. source (string): The source model format. destination (string): The destination model format. ''' @@ -1001,7 +1001,7 @@ def _one_shot_query(self): except Exception as e: logger.info("Fail to parse {} due to {}.".format(self.cfg, str(e))) self.cur_config = None - raise ValueError("Please check if the format of {} follows LPOT yaml schema.". + raise ValueError("Please check if the format of {} follows Neural Compressor yaml schema.". format(self.cfg)) def get_version(self): diff --git a/lpot/adaptor/tensorflow.yaml b/neural_compressor/adaptor/tensorflow.yaml similarity index 100% rename from lpot/adaptor/tensorflow.yaml rename to neural_compressor/adaptor/tensorflow.yaml diff --git a/lpot/adaptor/tensorflow_itex.yaml b/neural_compressor/adaptor/tensorflow_itex.yaml similarity index 100% rename from lpot/adaptor/tensorflow_itex.yaml rename to neural_compressor/adaptor/tensorflow_itex.yaml diff --git a/lpot/adaptor/tf_utils/__init__.py b/neural_compressor/adaptor/tf_utils/__init__.py similarity index 100% rename from lpot/adaptor/tf_utils/__init__.py rename to neural_compressor/adaptor/tf_utils/__init__.py diff --git a/lpot/adaptor/tf_utils/graph_converter.py b/neural_compressor/adaptor/tf_utils/graph_converter.py similarity index 97% rename from lpot/adaptor/tf_utils/graph_converter.py rename to neural_compressor/adaptor/tf_utils/graph_converter.py index 0aebf86f638..33ede3246cf 100644 --- a/lpot/adaptor/tf_utils/graph_converter.py +++ b/neural_compressor/adaptor/tf_utils/graph_converter.py @@ -24,14 +24,14 @@ from tensorflow.core.framework import graph_pb2 from tensorflow.python.framework import tensor_util from tensorflow.python.platform import gfile -from lpot.utils.utility import get_all_fp32_data -from lpot.utils.utility import get_tensor_histogram -from lpot.utils.utility import combine_histogram -from lpot.utils.utility import CaptureOutputToFile -from lpot.utils.utility import str2array -from lpot.utils.utility import Dequantize, DequantizeWeight -from lpot.conf.dotdict import deep_get -from lpot.experimental.common import Model +from neural_compressor.utils.utility import get_all_fp32_data +from neural_compressor.utils.utility import get_tensor_histogram +from neural_compressor.utils.utility import combine_histogram +from neural_compressor.utils.utility import CaptureOutputToFile +from neural_compressor.utils.utility import str2array +from neural_compressor.utils.utility import Dequantize, DequantizeWeight +from neural_compressor.conf.dotdict import deep_get +from neural_compressor.experimental.common import Model from .transform_graph.insert_logging import InsertLogging from .transform_graph.rerange_quantized_concat import RerangeQuantizedConcat from .transform_graph.bias_correction import BiasCorrection @@ -58,8 +58,8 @@ from .graph_rewriter.int8.meta_op_optimizer import MetaInfoChangingMemOpOptimizer from .graph_rewriter.int8.rnn_convert import QuantizedRNNConverter from .graph_rewriter.itex.itex_convert import GenerateITEXModel -from lpot.adaptor.tf_utils.graph_rewriter.generic.insert_print_node import InsertPrintMinMaxNode -from lpot.adaptor.tf_utils.graph_rewriter.graph_util import GraphRewriterHelper as Helper +from neural_compressor.adaptor.tf_utils.graph_rewriter.generic.insert_print_node import InsertPrintMinMaxNode +from neural_compressor.adaptor.tf_utils.graph_rewriter.graph_util import GraphRewriterHelper as Helper TF_SUPPORTED_MAX_VERSION = '2.6.0' diff --git a/lpot/adaptor/tf_utils/graph_converter_without_calib.py b/neural_compressor/adaptor/tf_utils/graph_converter_without_calib.py similarity index 97% rename from lpot/adaptor/tf_utils/graph_converter_without_calib.py rename to neural_compressor/adaptor/tf_utils/graph_converter_without_calib.py index 8b1e96d08eb..7f04d44e428 100644 --- a/lpot/adaptor/tf_utils/graph_converter_without_calib.py +++ b/neural_compressor/adaptor/tf_utils/graph_converter_without_calib.py @@ -21,8 +21,8 @@ import logging import tensorflow as tf from tensorflow.python.platform import gfile -from lpot.conf.dotdict import deep_get -from lpot.experimental.common import Model +from neural_compressor.conf.dotdict import deep_get +from neural_compressor.experimental.common import Model from .transform_graph.rerange_quantized_concat import RerangeQuantizedConcat from .transform_graph.bias_correction import BiasCorrection from .quantize_graph.quantize_graph_for_intel_cpu import QuantizeGraphForIntel diff --git a/lpot/adaptor/tf_utils/graph_rewriter/__init__.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/__init__.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/__init__.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/__init__.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/bf16/__init__.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/__init__.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/bf16/__init__.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/__init__.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/bf16/bf16_convert.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/__init__.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/__init__.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/__init__.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/__init__.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd.py similarity index 97% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd.py index 61babe17ebc..f2862f61633 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_add_to_biasadd.py @@ -17,7 +17,7 @@ import numpy as np from tensorflow.python.framework import dtypes -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/convert_layout.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_layout.py similarity index 98% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/convert_layout.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_layout.py index 00729fde6ac..dce6a817822 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/convert_layout.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_layout.py @@ -21,7 +21,7 @@ from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.grappler import tf_optimizer from tensorflow.core.protobuf import meta_graph_pb2 -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase class ConvertLayoutOptimizer(GraphRewriterBase): diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu.py similarity index 97% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu.py index 64f7cd36fa9..bb18fc8eced 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/convert_leakyrelu.py @@ -17,7 +17,7 @@ from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_util -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd.py similarity index 97% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd.py index 0d7086d5b4a..972e0d36ac7 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/dummy_biasadd.py @@ -16,7 +16,7 @@ # limitations under the License. from tensorflow.python.framework import dtypes -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm.py similarity index 99% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm.py index 40b068e3472..0768bc15423 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_batch_norm.py @@ -22,7 +22,7 @@ from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.framework import tensor_util -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer from ..graph_util import GraphRewriterHelper as Helper diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/fold_constant.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant.py similarity index 99% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/fold_constant.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant.py index 4b165a2f227..b92eaac994e 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/fold_constant.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fold_constant.py @@ -19,7 +19,7 @@ import numpy as np import tensorflow as tf from tensorflow.python.platform import tf_logging -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer, GraphRewriterHelper diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_biasadd_add.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul.py similarity index 98% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul.py index 96d1b75b097..ac170533c55 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_column_wise_mul.py @@ -19,7 +19,7 @@ from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.framework import tensor_util from tensorflow.python.framework import dtypes -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math.py similarity index 98% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math.py index 59318c36b8d..35591d1353b 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_conv_with_math.py @@ -18,7 +18,7 @@ from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_util -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_gelu.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_pad_with_conv.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose.py similarity index 97% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose.py index b6b8e8ad9f3..b0a32c63686 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_reshape_transpose.py @@ -18,7 +18,7 @@ from tensorflow.python.framework import tensor_util from tensorflow.python.framework import dtypes -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer.py similarity index 98% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer.py index 448a5c4ddb7..cf61e19bdba 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/graph_cse_optimizer.py @@ -17,7 +17,7 @@ from tensorflow.core.framework import graph_pb2 -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/grappler_pass.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/grappler_pass.py similarity index 97% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/grappler_pass.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/grappler_pass.py index 836d7a6635a..47f03f9876a 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/grappler_pass.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/grappler_pass.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/insert_print_node.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/insert_print_node.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/insert_print_node.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/insert_print_node.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/pre_optimize.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/pre_optimize.py similarity index 97% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/pre_optimize.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/pre_optimize.py index d2e594ee6b6..0c5e7ba81ef 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/pre_optimize.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/pre_optimize.py @@ -17,8 +17,8 @@ import logging -from lpot.adaptor.tf_utils.graph_rewriter.graph_util import GraphAnalyzer -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.adaptor.tf_utils.graph_rewriter.graph_util import GraphAnalyzer +from neural_compressor.utils.utility import dump_elapsed_time from .fuse_column_wise_mul import FuseColumnWiseMulOptimizer from .remove_training_nodes import RemoveTrainingNodesOptimizer @@ -76,7 +76,7 @@ def get_optimized_model(self): [graphdef]: the optimized graphdef object. """ - from lpot.experimental.common import Model + from neural_compressor.experimental.common import Model origin_model = Model(self.model._model, **self.model.kwargs) origin_model.name = self.model.name diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes.py similarity index 97% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes.py index b7bfefb31df..0c6ed660c37 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/remove_training_nodes.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/split_shared_input.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/split_shared_input.py similarity index 96% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/split_shared_input.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/split_shared_input.py index 157ba0cad32..f0c2af3c676 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/split_shared_input.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/split_shared_input.py @@ -17,7 +17,7 @@ from tensorflow.core.framework import node_def_pb2 -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer @@ -47,7 +47,7 @@ def do_transformation(self): input_map[input_node_name].append(node.name) new_input_node = node_def_pb2.NodeDef() new_input_node.CopyFrom(graph_info[input_node_name].node) - new_input_node.name = input_node_name + '_lpot_share_' + str( + new_input_node.name = input_node_name + '_nc_share_' + str( len(input_map[input_node_name])) cur_graph.replace_const_node( new_input_node, [node.name], input_node_name, False) diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes.py similarity index 84% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes.py index 903488de61a..e10c16de018 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/strip_unused_nodes.py @@ -16,7 +16,7 @@ # limitations under the License. -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase @@ -28,8 +28,8 @@ def __init__(self, model, input_node_names, output_node_names): @dump_elapsed_time("Pass StripUnusedNodesOptimizer") def do_transformation(self): - from lpot.adaptor.tf_utils.util import fix_ref_type_of_graph_def - from lpot.adaptor.tf_utils.util import strip_unused_nodes + from neural_compressor.adaptor.tf_utils.util import fix_ref_type_of_graph_def + from neural_compressor.adaptor.tf_utils.util import strip_unused_nodes self.model = fix_ref_type_of_graph_def(self.model) return strip_unused_nodes(self.model, self.input_node_names, diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer.py similarity index 98% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer.py index 1010b64f392..fd0f16da725 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/switch_optimizer.py @@ -19,7 +19,7 @@ from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from tensorflow.python.framework import tensor_util diff --git a/lpot/adaptor/tf_utils/graph_rewriter/generic/update_enter.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/update_enter.py similarity index 96% rename from lpot/adaptor/tf_utils/graph_rewriter/generic/update_enter.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/generic/update_enter.py index b6acde414c1..82eed400e77 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/generic/update_enter.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/update_enter.py @@ -19,7 +19,7 @@ from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time class UpdateEnterOptimizer(GraphRewriterBase): diff --git a/lpot/adaptor/tf_utils/graph_rewriter/graph_base.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/graph_base.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/graph_base.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/graph_base.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/graph_util.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/graph_util.py similarity index 99% rename from lpot/adaptor/tf_utils/graph_rewriter/graph_util.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/graph_util.py index 1309aad0dc5..ba707fe8870 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/graph_util.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/graph_util.py @@ -26,7 +26,7 @@ from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import tensor_util -from lpot.utils.utility import singleton +from neural_compressor.utils.utility import singleton logger = logging.getLogger() diff --git a/lpot/adaptor/tf_utils/graph_rewriter/int8/__init__.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/__init__.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/int8/__init__.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/int8/__init__.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant.py similarity index 99% rename from lpot/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant.py index d4eed73df88..69329decc74 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_fake_quant.py @@ -16,7 +16,7 @@ # limitations under the License. -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/int8/freeze_value.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/int8/freeze_value.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value_without_calib.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_conv_requantize.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/int8/fuse_matmul_requantize.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer.py similarity index 98% rename from lpot/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer.py index d693bd532f6..73c2ee930de 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/meta_op_optimizer.py @@ -16,7 +16,7 @@ # limitations under the License. -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse.py similarity index 98% rename from lpot/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse.py index 18ee9026fe4..1df4ccf682a 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/post_quantized_op_cse.py @@ -18,7 +18,7 @@ import hashlib from tensorflow.core.framework import graph_pb2 from tensorflow.python.framework import tensor_util -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/int8/rnn_convert.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/rnn_convert.py similarity index 99% rename from lpot/adaptor/tf_utils/graph_rewriter/int8/rnn_convert.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/int8/rnn_convert.py index 4eff5cf2256..2c2d473598b 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/int8/rnn_convert.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/rnn_convert.py @@ -16,7 +16,7 @@ # limitations under the License. -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from ..graph_base import GraphRewriterBase from ..graph_util import GraphAnalyzer diff --git a/lpot/adaptor/tf_utils/graph_rewriter/int8/scale_propagation.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/scale_propagation.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/int8/scale_propagation.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/int8/scale_propagation.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/itex/__init__.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/itex/__init__.py similarity index 100% rename from lpot/adaptor/tf_utils/graph_rewriter/itex/__init__.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/itex/__init__.py diff --git a/lpot/adaptor/tf_utils/graph_rewriter/itex/itex_convert.py b/neural_compressor/adaptor/tf_utils/graph_rewriter/itex/itex_convert.py similarity index 97% rename from lpot/adaptor/tf_utils/graph_rewriter/itex/itex_convert.py rename to neural_compressor/adaptor/tf_utils/graph_rewriter/itex/itex_convert.py index 0bfbe3c9169..f98a5933fc5 100644 --- a/lpot/adaptor/tf_utils/graph_rewriter/itex/itex_convert.py +++ b/neural_compressor/adaptor/tf_utils/graph_rewriter/itex/itex_convert.py @@ -19,8 +19,8 @@ import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import tensor_util -from lpot.utils.utility import dump_elapsed_time -from lpot.adaptor.tf_utils.graph_rewriter.graph_util import GraphAnalyzer +from neural_compressor.utils.utility import dump_elapsed_time +from neural_compressor.adaptor.tf_utils.graph_rewriter.graph_util import GraphAnalyzer from ..graph_base import GraphRewriterBase from ..graph_util import GraphRewriterHelper as Helper diff --git a/lpot/adaptor/tf_utils/quantize_graph/__init__.py b/neural_compressor/adaptor/tf_utils/quantize_graph/__init__.py similarity index 100% rename from lpot/adaptor/tf_utils/quantize_graph/__init__.py rename to neural_compressor/adaptor/tf_utils/quantize_graph/__init__.py diff --git a/lpot/adaptor/tf_utils/quantize_graph/quantize_graph_base.py b/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_base.py similarity index 100% rename from lpot/adaptor/tf_utils/quantize_graph/quantize_graph_base.py rename to neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_base.py diff --git a/lpot/adaptor/tf_utils/quantize_graph/quantize_graph_common.py b/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_common.py similarity index 100% rename from lpot/adaptor/tf_utils/quantize_graph/quantize_graph_common.py rename to neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_common.py diff --git a/lpot/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2.py b/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2.py similarity index 100% rename from lpot/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2.py rename to neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2.py diff --git a/lpot/adaptor/tf_utils/quantize_graph/quantize_graph_conv.py b/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_conv.py similarity index 100% rename from lpot/adaptor/tf_utils/quantize_graph/quantize_graph_conv.py rename to neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_conv.py diff --git a/lpot/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu.py b/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu.py similarity index 98% rename from lpot/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu.py rename to neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu.py index 71a4085560c..27f199b8072 100644 --- a/lpot/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu.py +++ b/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_for_intel_cpu.py @@ -17,7 +17,7 @@ from tensorflow.core.framework import graph_pb2 from tensorflow.python.platform import gfile -from lpot.utils.utility import dump_elapsed_time +from neural_compressor.utils.utility import dump_elapsed_time from .quantize_graph_base import QuantizeGraphBase from .quantize_graph_common import QuantizeGraphHelper diff --git a/lpot/adaptor/tf_utils/quantize_graph/quantize_graph_matmul.py b/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_matmul.py similarity index 100% rename from lpot/adaptor/tf_utils/quantize_graph/quantize_graph_matmul.py rename to neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_matmul.py diff --git a/lpot/adaptor/tf_utils/quantize_graph/quantize_graph_pooling.py b/neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_pooling.py similarity index 100% rename from lpot/adaptor/tf_utils/quantize_graph/quantize_graph_pooling.py rename to neural_compressor/adaptor/tf_utils/quantize_graph/quantize_graph_pooling.py diff --git a/lpot/adaptor/tf_utils/transform_graph/__init__.py b/neural_compressor/adaptor/tf_utils/transform_graph/__init__.py similarity index 100% rename from lpot/adaptor/tf_utils/transform_graph/__init__.py rename to neural_compressor/adaptor/tf_utils/transform_graph/__init__.py diff --git a/lpot/adaptor/tf_utils/transform_graph/bias_correction.py b/neural_compressor/adaptor/tf_utils/transform_graph/bias_correction.py similarity index 100% rename from lpot/adaptor/tf_utils/transform_graph/bias_correction.py rename to neural_compressor/adaptor/tf_utils/transform_graph/bias_correction.py diff --git a/lpot/adaptor/tf_utils/transform_graph/graph_transform_base.py b/neural_compressor/adaptor/tf_utils/transform_graph/graph_transform_base.py similarity index 100% rename from lpot/adaptor/tf_utils/transform_graph/graph_transform_base.py rename to neural_compressor/adaptor/tf_utils/transform_graph/graph_transform_base.py diff --git a/lpot/adaptor/tf_utils/transform_graph/insert_logging.py b/neural_compressor/adaptor/tf_utils/transform_graph/insert_logging.py similarity index 100% rename from lpot/adaptor/tf_utils/transform_graph/insert_logging.py rename to neural_compressor/adaptor/tf_utils/transform_graph/insert_logging.py diff --git a/lpot/adaptor/tf_utils/transform_graph/rerange_quantized_concat.py b/neural_compressor/adaptor/tf_utils/transform_graph/rerange_quantized_concat.py similarity index 98% rename from lpot/adaptor/tf_utils/transform_graph/rerange_quantized_concat.py rename to neural_compressor/adaptor/tf_utils/transform_graph/rerange_quantized_concat.py index c9b0da1aa49..092d85456ec 100644 --- a/lpot/adaptor/tf_utils/transform_graph/rerange_quantized_concat.py +++ b/neural_compressor/adaptor/tf_utils/transform_graph/rerange_quantized_concat.py @@ -25,7 +25,7 @@ from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.framework import dtypes from .graph_transform_base import GraphTransformBase -from lpot.adaptor.tf_utils.graph_rewriter.graph_util import GraphRewriterHelper as Helper +from neural_compressor.adaptor.tf_utils.graph_rewriter.graph_util import GraphRewriterHelper as Helper class RerangeQuantizedConcat(GraphTransformBase): diff --git a/lpot/adaptor/tf_utils/util.py b/neural_compressor/adaptor/tf_utils/util.py similarity index 98% rename from lpot/adaptor/tf_utils/util.py rename to neural_compressor/adaptor/tf_utils/util.py index 890579bb2c7..37f6ba34057 100644 --- a/lpot/adaptor/tf_utils/util.py +++ b/neural_compressor/adaptor/tf_utils/util.py @@ -25,7 +25,7 @@ from tensorflow.python.platform import gfile from tensorflow.core.framework import node_def_pb2 from tensorflow.core.framework import attr_value_pb2 -from lpot.utils import logger +from neural_compressor.utils import logger from .graph_rewriter.graph_util import GraphAnalyzer def disable_random(seed=1): @@ -319,9 +319,9 @@ def strip_unused_nodes(graph_def, input_node_names, output_node_names): # THIS API IS TO BE DEPRECATED! def get_graph_def(model, outputs=[], auto_input_output=False): - from lpot.experimental.common import Model as LpotModel - if not isinstance(model, LpotModel): - model = LpotModel(model) + from neural_compressor.experimental.common import Model as NCModel + if not isinstance(model, NCModel): + model = NCModel(model) model.output_tensor_names = outputs return model.graph_def diff --git a/lpot/adaptor/torch_utils/__init__.py b/neural_compressor/adaptor/torch_utils/__init__.py similarity index 100% rename from lpot/adaptor/torch_utils/__init__.py rename to neural_compressor/adaptor/torch_utils/__init__.py diff --git a/lpot/adaptor/torch_utils/util.py b/neural_compressor/adaptor/torch_utils/util.py similarity index 100% rename from lpot/adaptor/torch_utils/util.py rename to neural_compressor/adaptor/torch_utils/util.py diff --git a/lpot/algorithm/__init__.py b/neural_compressor/algorithm/__init__.py similarity index 100% rename from lpot/algorithm/__init__.py rename to neural_compressor/algorithm/__init__.py diff --git a/lpot/algorithm/algorithm.py b/neural_compressor/algorithm/algorithm.py similarity index 98% rename from lpot/algorithm/algorithm.py rename to neural_compressor/algorithm/algorithm.py index b6ad2c4a71f..56746514a44 100644 --- a/lpot/algorithm/algorithm.py +++ b/neural_compressor/algorithm/algorithm.py @@ -16,7 +16,7 @@ # limitations under the License. from abc import abstractmethod -from lpot.utils.create_obj_from_config import get_algorithm +from neural_compressor.utils.create_obj_from_config import get_algorithm registry_algorithms = {} diff --git a/lpot/algorithm/fast_bias_correction.py b/neural_compressor/algorithm/fast_bias_correction.py similarity index 100% rename from lpot/algorithm/fast_bias_correction.py rename to neural_compressor/algorithm/fast_bias_correction.py diff --git a/lpot/algorithm/weight_correction.py b/neural_compressor/algorithm/weight_correction.py similarity index 100% rename from lpot/algorithm/weight_correction.py rename to neural_compressor/algorithm/weight_correction.py diff --git a/lpot/benchmark.py b/neural_compressor/benchmark.py similarity index 84% rename from lpot/benchmark.py rename to neural_compressor/benchmark.py index b27d8723cc3..e389ab247d4 100644 --- a/lpot/benchmark.py +++ b/neural_compressor/benchmark.py @@ -36,7 +36,7 @@ def __init__(self, conf_fname_or_obj): def __call__(self, model, b_dataloader=None, b_func=None): logger.warning("This API is going to be deprecated. Please import " - "lpot.experimental.Bencharmk, initialize an instance of `Benchmark`," + "neural_compressor.experimental.Bencharmk, initialize an instance of `Benchmark`," "set its dataloader and metric attributes, then invoke its __call__ method.") self.exp_benchmarker.model = model @@ -56,11 +56,11 @@ def dataloader(self, dataset, batch_size=1, collate_fn=None, last_batch='rollove pin_memory=pin_memory, shuffle=shuffle, distributed=distributed) def metric(self, name, metric_cls, **kwargs): - from .experimental.common import Metric as LpotMetric - lpot_metric = LpotMetric(metric_cls, name, **kwargs) - self.exp_benchmarker.metric = lpot_metric + from .experimental.common import Metric as NCMetric + nc_metric = NCMetric(metric_cls, name, **kwargs) + self.exp_benchmarker.metric = nc_metric def postprocess(self, name, postprocess_cls, **kwargs): - from .experimental.common import Postprocess as LpotPostprocess - lpot_postprocess = LpotPostprocess(postprocess_cls, name, **kwargs) - self.exp_benchmarker.postprocess = lpot_postprocess + from .experimental.common import Postprocess as NCPostprocess + nc_postprocess = NCPostprocess(postprocess_cls, name, **kwargs) + self.exp_benchmarker.postprocess = nc_postprocess diff --git a/lpot/conf/README.md b/neural_compressor/conf/README.md similarity index 100% rename from lpot/conf/README.md rename to neural_compressor/conf/README.md diff --git a/lpot/conf/__init__.py b/neural_compressor/conf/__init__.py similarity index 100% rename from lpot/conf/__init__.py rename to neural_compressor/conf/__init__.py diff --git a/lpot/conf/config.py b/neural_compressor/conf/config.py similarity index 99% rename from lpot/conf/config.py rename to neural_compressor/conf/config.py index fb14f04eab7..5cbea633176 100644 --- a/lpot/conf/config.py +++ b/neural_compressor/conf/config.py @@ -536,7 +536,7 @@ def percent_to_float(data): Optional('weight_compression'): weight_compression_schema, }) -default_workspace = './lpot_workspace/{}/'.format( +default_workspace = './nc_workspace/{}/'.format( datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) schema = Schema({ @@ -650,7 +650,7 @@ def percent_to_float(data): Optional('strategy', default={'name': 'basic'}): { 'name': And(str, lambda s: s in STRATEGIES), Optional('sigopt_api_token'): str, Optional('sigopt_project_id'): str, - Optional('sigopt_experiment_name', default='lpot-tune'): str, + Optional('sigopt_experiment_name', default='nc-tune'): str, Optional('accuracy_weight', default=1.0): float, Optional('latency_weight', default=1.0): float } , @@ -896,7 +896,7 @@ def _read_cfg(self, cfg_fname): cfg = yaml.safe_load(content) validated_cfg = schema.validate(cfg) - # if user yaml doesn't include version field, lpot will write a supported version + # if user yaml doesn't include version field, neural_compressor will write a supported version # into it. if 'version' not in cfg: leading_whitespace = re.search(r"[ \t]*model\s*:", diff --git a/lpot/conf/dotdict.py b/neural_compressor/conf/dotdict.py similarity index 100% rename from lpot/conf/dotdict.py rename to neural_compressor/conf/dotdict.py diff --git a/lpot/contrib/__init__.py b/neural_compressor/contrib/__init__.py similarity index 100% rename from lpot/contrib/__init__.py rename to neural_compressor/contrib/__init__.py diff --git a/lpot/contrib/strategy/__init__.py b/neural_compressor/contrib/strategy/__init__.py similarity index 100% rename from lpot/contrib/strategy/__init__.py rename to neural_compressor/contrib/strategy/__init__.py diff --git a/lpot/contrib/strategy/sigopt.py b/neural_compressor/contrib/strategy/sigopt.py similarity index 97% rename from lpot/contrib/strategy/sigopt.py rename to neural_compressor/contrib/strategy/sigopt.py index 6323b523f67..e2877dbcdba 100644 --- a/lpot/contrib/strategy/sigopt.py +++ b/neural_compressor/contrib/strategy/sigopt.py @@ -16,8 +16,8 @@ # limitations under the License. import copy -from lpot.utils import logger -from lpot.strategy.strategy import strategy_registry, TuneStrategy +from neural_compressor.utils import logger +from neural_compressor.strategy.strategy import strategy_registry, TuneStrategy from sigopt import Connection @@ -98,8 +98,8 @@ def __init__(self, model, conf, q_dataloader, q_func=None, logger.error("`sigopt_project_id` field in yaml file is required. " \ "Please refer to details in /docs/sigopt_strategy.md.") exit(0) - if self.experiment_name == 'lpot-tune': - logger.info("Default experiment name `lpot-tune` is used, " \ + if self.experiment_name == 'nc-tune': + logger.info("Default experiment name `nc-tune` is used, " \ "Please refer to details in /docs/sigopt_strategy.md " \ "if user wants to modify it.") else: diff --git a/lpot/contrib/strategy/tpe.py b/neural_compressor/contrib/strategy/tpe.py similarity index 99% rename from lpot/contrib/strategy/tpe.py rename to neural_compressor/contrib/strategy/tpe.py index 9a285aa095e..60f0cd676ff 100644 --- a/lpot/contrib/strategy/tpe.py +++ b/neural_compressor/contrib/strategy/tpe.py @@ -22,8 +22,8 @@ import numpy as np import hyperopt as hpo from hyperopt import fmin, hp, STATUS_OK, Trials -from lpot.utils import logger -from lpot.strategy.strategy import strategy_registry, TuneStrategy +from neural_compressor.utils import logger +from neural_compressor.strategy.strategy import strategy_registry, TuneStrategy try: import pandas as pd diff --git a/lpot/data/__init__.py b/neural_compressor/data/__init__.py similarity index 93% rename from lpot/data/__init__.py rename to neural_compressor/data/__init__.py index cc62da854f1..2883a446e7b 100644 --- a/lpot/data/__init__.py +++ b/neural_compressor/data/__init__.py @@ -17,8 +17,8 @@ from .dataloaders import DataLoader -import lpot.data.datasets -import lpot.data.transforms +import neural_compressor.data.datasets +import neural_compressor.data.transforms from ..experimental.data.datasets import DATASETS, Dataset, IterableDataset, dataset_registry from ..experimental.data.transforms import TRANSFORMS, BaseTransform, transform_registry from ..experimental.data.dataloaders import DATALOADERS diff --git a/lpot/data/dataloaders/__init__.py b/neural_compressor/data/dataloaders/__init__.py similarity index 100% rename from lpot/data/dataloaders/__init__.py rename to neural_compressor/data/dataloaders/__init__.py diff --git a/lpot/data/dataloaders/dataloader.py b/neural_compressor/data/dataloaders/dataloader.py similarity index 96% rename from lpot/data/dataloaders/dataloader.py rename to neural_compressor/data/dataloaders/dataloader.py index 7f1f6222736..b9f257a3dd2 100644 --- a/lpot/data/dataloaders/dataloader.py +++ b/neural_compressor/data/dataloaders/dataloader.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lpot.experimental.data.dataloaders import DATALOADERS +from neural_compressor.experimental.data.dataloaders import DATALOADERS # THIS API IS TO BE DEPRECATED! class DataLoader(object): diff --git a/lpot/data/datasets/__init__.py b/neural_compressor/data/datasets/__init__.py similarity index 100% rename from lpot/data/datasets/__init__.py rename to neural_compressor/data/datasets/__init__.py diff --git a/lpot/data/datasets/imagenet_dataset.py b/neural_compressor/data/datasets/imagenet_dataset.py similarity index 93% rename from lpot/data/datasets/imagenet_dataset.py rename to neural_compressor/data/datasets/imagenet_dataset.py index aff039c01fb..3cf944ad2ef 100644 --- a/lpot/data/datasets/imagenet_dataset.py +++ b/neural_compressor/data/datasets/imagenet_dataset.py @@ -31,9 +31,9 @@ # ============================================================================== import os from PIL import Image -from lpot.utils.utility import LazyImport -from lpot.utils import logger -from lpot.experimental.data.datasets import dataset_registry, IterableDataset, Dataset +from neural_compressor.utils.utility import LazyImport +from neural_compressor.utils import logger +from neural_compressor.experimental.data.datasets import dataset_registry, IterableDataset, Dataset tf = LazyImport('tensorflow') # BELOW API TO BE DEPRECATED! @@ -55,7 +55,7 @@ def __new__(cls, root, subset='validation', num_cores=28, transform=None, filter raise ValueError('Found no files in --root matching: {}'.format(glob_pattern)) from tensorflow.python.data.experimental import parallel_interleave - from lpot.experimental.data.transforms.imagenet_transform import ParseDecodeImagenet + from neural_compressor.experimental.data.transforms.imagenet_transform import ParseDecodeImagenet ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False) ds = ds.apply( parallel_interleave( diff --git a/lpot/data/transforms/__init__.py b/neural_compressor/data/transforms/__init__.py similarity index 100% rename from lpot/data/transforms/__init__.py rename to neural_compressor/data/transforms/__init__.py diff --git a/lpot/data/transforms/coco_transform.py b/neural_compressor/data/transforms/coco_transform.py similarity index 91% rename from lpot/data/transforms/coco_transform.py rename to neural_compressor/data/transforms/coco_transform.py index a2fc9b590e5..7bef847eb78 100644 --- a/lpot/data/transforms/coco_transform.py +++ b/neural_compressor/data/transforms/coco_transform.py @@ -30,8 +30,8 @@ # limitations under the License. # ============================================================================== -from lpot.utils import logger -from lpot.experimental.data.transforms import transform_registry, BaseTransform +from neural_compressor.utils import logger +from neural_compressor.experimental.data.transforms import transform_registry, BaseTransform # BELOW IS TO BE DEPRECATED! @transform_registry(transform_type="ParseDecodeCoco", \ @@ -40,5 +40,5 @@ class ParseDecodeCocoTransform(BaseTransform): def __call__(self, sample): logger.warning("This transform is going to be deprecated, " \ - "coco decoding will be performed automatically from LPOT v1.4.") + "coco decoding will be performed automatically from Neural Compressor v1.4.") return sample diff --git a/lpot/data/transforms/imagenet_transform.py b/neural_compressor/data/transforms/imagenet_transform.py similarity index 98% rename from lpot/data/transforms/imagenet_transform.py rename to neural_compressor/data/transforms/imagenet_transform.py index eda0b9afd8a..cb3bdbb67c6 100644 --- a/lpot/data/transforms/imagenet_transform.py +++ b/neural_compressor/data/transforms/imagenet_transform.py @@ -31,9 +31,9 @@ # ============================================================================== import numpy as np -from lpot.utils import logger -from lpot.utils.utility import LazyImport -from lpot.experimental.data.transforms import transform_registry, BaseTransform +from neural_compressor.utils import logger +from neural_compressor.utils.utility import LazyImport +from neural_compressor.experimental.data.transforms import transform_registry, BaseTransform tf = LazyImport('tensorflow') cv2 = LazyImport('cv2') @@ -45,7 +45,7 @@ class ParseDecodeImagenetTransform(BaseTransform): def __call__(self, sample): logger.warning("This transform is going to be deprecated, " \ - "imagenet decoding will be performed automatically from LPOT v1.4.") + "imagenet decoding will be performed automatically from Neural Compressor v1.4.") return sample @transform_registry(transform_type="ResizeCropImagenet", \ diff --git a/lpot/experimental/__init__.py b/neural_compressor/experimental/__init__.py similarity index 100% rename from lpot/experimental/__init__.py rename to neural_compressor/experimental/__init__.py diff --git a/lpot/experimental/benchmark.py b/neural_compressor/experimental/benchmark.py similarity index 88% rename from lpot/experimental/benchmark.py rename to neural_compressor/experimental/benchmark.py index 763cc99d60b..901766fe2f9 100644 --- a/lpot/experimental/benchmark.py +++ b/neural_compressor/experimental/benchmark.py @@ -32,9 +32,9 @@ from ..model import BaseModel from .data import TRANSFORMS from .metric import METRICS -from .common import Model as LpotModel -from .common import Metric as LpotMetric -from .common import Postprocess as LpotPostprocess +from .common import Model as NCModel +from .common import Metric as NCMetric +from .common import Postprocess as NCPostprocess from .common import _generate_common_dataloader from ..model.model import get_model_fwk_name @@ -47,7 +47,7 @@ def set_env_var(env_var, value, overwrite_existing=False): os.environ[env_var] = str(value) def set_all_env_var(conf, overwrite_existing=False): - # lpot only use physical cores + # neural_compressor only use physical cores cpu_counts = psutil.cpu_count(logical=False) if not conf: conf = {} @@ -64,12 +64,12 @@ def set_all_env_var(conf, overwrite_existing=False): set_env_var(var.upper(), value, overwrite_existing) # a special but usually used case, directly use current process if conf['num_of_instance'] == 1 and conf['cores_per_instance'] == cpu_counts: - set_env_var('LPOT_ENV_CONF', True, overwrite_existing=True) + set_env_var('NC_ENV_CONF', True, overwrite_existing=True) class Benchmark(object): """Benchmark class can be used to evaluate the model performance, with the objective setting, user can get the data of what they configured in yaml - NOTICE: lpot Benchmark will use the original command to run sub process, this will + NOTICE: neural_compressor Benchmark will use the original command to run sub process, this will depend on user's code and has possibility to run unneccessary code Args: @@ -102,7 +102,7 @@ def __call__(self, mode='performance'): set_all_env_var(deep_get(cfg, 'evaluation.{}.configs'.format(mode))) logger.info("Start to run Benchmark.") - if os.environ.get('LPOT_ENV_CONF') == 'True': + if os.environ.get('NC_ENV_CONF') == 'True': return self.run_instance(mode) else: return self.config_instance() @@ -128,7 +128,7 @@ def config_instance(self): multi_instance_cmd += 'wait' if sys.platform in ['linux'] else '' logger.info("Running command is\n{}".format(multi_instance_cmd)) # each instance will execute single instance - set_env_var('LPOT_ENV_CONF', True, overwrite_existing=True) + set_env_var('NC_ENV_CONF', True, overwrite_existing=True) if sys.platform in ['linux']: p = subprocess.Popen(multi_instance_cmd, preexec_fn=os.setsid, shell=True) # nosec elif sys.platform in ['win32']: # pragma: no cover @@ -144,7 +144,7 @@ def generate_prefix(self, core_list): len(core_list), ','.join(core_list.astype(str))) elif sys.platform in ['win32']: # pragma: no cover # (TODO) should we move the hw_info from ux? - from lpot.ux.utils.hw_info import get_number_of_sockets + from neural_compressor.ux.utils.hw_info import get_number_of_sockets num_of_socket = int(get_number_of_sockets()) cores_per_instance = int(os.environ.get('CORES_PER_INSTANCE')) cores_per_socket = int(psutil.cpu_count(logical=False)) / num_of_socket @@ -182,7 +182,7 @@ def run_instance(self, mode): {"workspace_path": cfg.tuning.workspace.path, \ "b_dataloader": self._b_dataloader}) - assert isinstance(self._model, BaseModel), 'need set lpot Model for quantization....' + assert isinstance(self._model, BaseModel), 'need set neural_compressor Model for quantization....' adaptor = FRAMEWORKS[framework](framework_specific_info) @@ -271,9 +271,9 @@ def b_dataloader(self, dataloader): which meet the requirements that can yield tuple of (input, label)/(input, _) batched data. Another good practice is to use - lpot.experimental.common.DataLoader - to initialize a lpot dataloader object. - Notice lpot.experimental.common.DataLoader + neural_compressor.experimental.common.DataLoader + to initialize a neural_compressor dataloader object. + Notice neural_compressor.experimental.common.DataLoader is just a wrapper of the information needed to build a dataloader, it can't yield batched data and only in this setter method @@ -282,7 +282,7 @@ def b_dataloader(self, dataloader): and only after the Quantization object created then framework infomation can be known. Future we will support creating iterable dataloader - from lpot.experimental.common.DataLoader + from neural_compressor.experimental.common.DataLoader """ self._b_dataloader = _generate_common_dataloader(dataloader, self.framework) @@ -299,7 +299,7 @@ def model(self, user_model): user_model: user are supported to set model from original framework model format (eg, tensorflow frozen_pb or path to a saved model), but not recommended. Best practice is to set from a initialized - lpot.experimental.common.Model. + neural_compressor.experimental.common.Model. If tensorflow model is used, model's inputs/outputs will be auto inferenced, but sometimes auto inferenced inputs/outputs will not meet your requests, @@ -310,8 +310,8 @@ def model(self, user_model): """ if not isinstance(user_model, BaseModel): - logger.warning("Force convert framework model to lpot model.") - self._model = LpotModel(user_model) + logger.warning("Force convert framework model to neural_compressor model.") + self._model = NCModel(user_model) else: self._model = user_model @@ -335,23 +335,23 @@ def metric(self): @metric.setter def metric(self, user_metric): - """Set metric class and lpot will initialize this class when evaluation - lpot have many built-in metrics, but user can set specific metric through + """Set metric class and neural_compressor will initialize this class when evaluation + neural_compressor have many built-in metrics, but user can set specific metric through this api. The metric class should take the outputs of the model or - postprocess(if have) as inputs, lpot built-in metric always take + postprocess(if have) as inputs, neural_compressor built-in metric always take (predictions, labels) as inputs for update, - and user_metric.metric_cls should be sub_class of lpot.metric.BaseMetric. + and user_metric.metric_cls should be sub_class of neural_compressor.metric.BaseMetric. Args: - user_metric(lpot.experimental.common.Metric): + user_metric(neural_compressor.experimental.common.Metric): user_metric should be object initialized from - lpot.experimental.common.Metric, in this method the + neural_compressor.experimental.common.Metric, in this method the user_metric.metric_cls will be registered to specific frameworks and initialized. """ - assert isinstance(user_metric, LpotMetric), \ - 'please initialize a lpot.experimental.common.Metric and set....' + assert isinstance(user_metric, NCMetric), \ + 'please initialize a neural_compressor.experimental.common.Metric and set....' metric_cfg = {user_metric.name : {**user_metric.kwargs}} if deep_get(self.conf.usr_cfg, "evaluation.accuracy.metric"): @@ -369,21 +369,21 @@ def postprocess(self, user_postprocess): @postprocess.setter def postprocess(self, user_postprocess): - """Set postprocess class and lpot will initialize this class when evaluation. + """Set postprocess class and neural_compressor will initialize this class when evaluation. The postprocess class should take the outputs of the model as inputs, and output (predictions, labels) as inputs for metric update. - user_postprocess.postprocess_cls should be sub_class of lpot.data.BaseTransform. + user_postprocess.postprocess_cls should be sub_class of neural_compressor.data.BaseTransform. Args: - user_postprocess(lpot.experimental.common.Postprocess): + user_postprocess(neural_compressor.experimental.common.Postprocess): user_postprocess should be object initialized from - lpot.experimental.common.Postprocess, + neural_compressor.experimental.common.Postprocess, in this method the user_postprocess.postprocess_cls will be registered to specific frameworks and initialized. """ - assert isinstance(user_postprocess, LpotPostprocess), \ - 'please initialize a lpot.experimental.common.Postprocess and set....' + assert isinstance(user_postprocess, NCPostprocess), \ + 'please initialize a neural_compressor.experimental.common.Postprocess and set....' postprocess_cfg = {user_postprocess.name : {**user_postprocess.kwargs}} if deep_get(self.conf.usr_cfg, "evaluation.accuracy.postprocess"): logger.warning("Override the value of `postprocess` field defined in yaml file" \ diff --git a/lpot/experimental/common/__init__.py b/neural_compressor/experimental/common/__init__.py similarity index 100% rename from lpot/experimental/common/__init__.py rename to neural_compressor/experimental/common/__init__.py diff --git a/lpot/experimental/common/criterion.py b/neural_compressor/experimental/common/criterion.py similarity index 99% rename from lpot/experimental/common/criterion.py rename to neural_compressor/experimental/common/criterion.py index 569862ead0d..850b6a9793b 100644 --- a/lpot/experimental/common/criterion.py +++ b/neural_compressor/experimental/common/criterion.py @@ -17,7 +17,7 @@ from abc import abstractmethod from collections import UserDict -from lpot.utils.utility import LazyImport, singleton +from neural_compressor.utils.utility import LazyImport, singleton torch = LazyImport('torch') tf = LazyImport('tensorflow') diff --git a/lpot/experimental/common/dataloader.py b/neural_compressor/experimental/common/dataloader.py similarity index 96% rename from lpot/experimental/common/dataloader.py rename to neural_compressor/experimental/common/dataloader.py index 18954f4e503..5f7417e5b6f 100644 --- a/lpot/experimental/common/dataloader.py +++ b/neural_compressor/experimental/common/dataloader.py @@ -24,7 +24,7 @@ class DataLoader(object): the reason is we have to know the framework info and only after the Quantization/Benchmark object created then framework infomation can be known. Future we will support - creating iterable dataloader from lpot.experimental.common.DataLoader + creating iterable dataloader from neural_compressor.experimental.common.DataLoader """ def __init__(self, dataset, batch_size=1, collate_fn=None, diff --git a/lpot/experimental/common/metric.py b/neural_compressor/experimental/common/metric.py similarity index 85% rename from lpot/experimental/common/metric.py rename to neural_compressor/experimental/common/metric.py index c242b90fd3e..0dce87ab67d 100644 --- a/lpot/experimental/common/metric.py +++ b/neural_compressor/experimental/common/metric.py @@ -20,9 +20,9 @@ class Metric(object): """ def __init__(self, metric_cls, name='user_metric', **kwargs): """The metric class should take the outputs of the model as the metric's inputs, - lpot built-in metric always take (predictions, labels) as inputs, it's + neural_compressor built-in metric always take (predictions, labels) as inputs, it's recommended to design metric_cls to take (predictions, labels) as inputs. - metric_cls should be sub_class of lpot.metric.BaseMetric. + metric_cls should be sub_class of neural_compressor.metric.BaseMetric. """ self.metric_cls = metric_cls self.name = name diff --git a/lpot/experimental/common/model.py b/neural_compressor/experimental/common/model.py similarity index 92% rename from lpot/experimental/common/model.py rename to neural_compressor/experimental/common/model.py index 296aca74fd2..da6a20aed16 100644 --- a/lpot/experimental/common/model.py +++ b/neural_compressor/experimental/common/model.py @@ -16,9 +16,9 @@ # limitations under the License. import sys -from lpot.model.model import get_model_fwk_name, MODELS, get_model_type -from lpot.utils import logger -from lpot.utils.utility import get_backend +from neural_compressor.model.model import get_model_fwk_name, MODELS, get_model_type +from neural_compressor.utils import logger +from neural_compressor.utils.utility import get_backend class Model(object): """common Model just collect the infos to construct a Model diff --git a/lpot/experimental/common/optimizer.py b/neural_compressor/experimental/common/optimizer.py similarity index 98% rename from lpot/experimental/common/optimizer.py rename to neural_compressor/experimental/common/optimizer.py index af9db2a3638..0dc28d03cd9 100644 --- a/lpot/experimental/common/optimizer.py +++ b/neural_compressor/experimental/common/optimizer.py @@ -16,7 +16,7 @@ # limitations under the License. from abc import abstractmethod -from lpot.utils.utility import LazyImport, singleton +from neural_compressor.utils.utility import LazyImport, singleton torch = LazyImport('torch') tf = LazyImport('tensorflow') diff --git a/lpot/experimental/common/postprocess.py b/neural_compressor/experimental/common/postprocess.py similarity index 100% rename from lpot/experimental/common/postprocess.py rename to neural_compressor/experimental/common/postprocess.py diff --git a/lpot/experimental/component.py b/neural_compressor/experimental/component.py similarity index 93% rename from lpot/experimental/component.py rename to neural_compressor/experimental/component.py index 4fbe01639bc..38115f3a24e 100644 --- a/lpot/experimental/component.py +++ b/neural_compressor/experimental/component.py @@ -24,7 +24,7 @@ from ..model.model import get_model_fwk_name class Component(object): - """This is base class of LPOT Component + """This is base class of Neural Compressor Component """ def __init__(self, conf_fname_or_obj=None, combination=None): self.conf = None @@ -274,9 +274,9 @@ def train_dataloader(self, dataloader): dataloader(generator): user are supported to set a user defined dataloader which meet the requirements that can yield tuple of (input, label)/(input, _) batched data. Another good - practice is to use lpot.experimental.common.DataLoader - to initialize a lpot dataloader object. Notice - lpot.experimental.common.DataLoader is just a wrapper of the + practice is to use neural_compressor.experimental.common.DataLoader + to initialize a neural_compressor dataloader object. Notice + neural_compressor.experimental.common.DataLoader is just a wrapper of the information needed to build a dataloader, it can't yield batched data and only in this setter method a 'real' train_dataloader will be created, @@ -284,7 +284,7 @@ def train_dataloader(self, dataloader): and only after the Component object created then framework information can be known. Future we will support creating iterable dataloader - from lpot.experimental.common.DataLoader + from neural_compressor.experimental.common.DataLoader """ from .common import _generate_common_dataloader self._train_dataloader = _generate_common_dataloader( @@ -308,9 +308,9 @@ def eval_dataloader(self, dataloader): dataloader(generator): user are supported to set a user defined dataloader which meet the requirements that can yield tuple of (input, label)/(input, _) batched data. Another good - practice is to use lpot.experimental.common.DataLoader - to initialize a lpot dataloader object. Notice - lpot.experimental.common.DataLoader is just a wrapper of the + practice is to use neural_compressor.experimental.common.DataLoader + to initialize a neural_compressor dataloader object. Notice + neural_compressor.experimental.common.DataLoader is just a wrapper of the information needed to build a dataloader, it can't yield batched data and only in this setter method a 'real' train_dataloader will be created, @@ -318,7 +318,7 @@ def eval_dataloader(self, dataloader): and only after the Component object created then framework information can be known. Future we will support creating iterable dataloader - from lpot.experimental.common.DataLoader + from neural_compressor.experimental.common.DataLoader """ from .common import _generate_common_dataloader self._eval_dataloader = _generate_common_dataloader( @@ -326,7 +326,7 @@ def eval_dataloader(self, dataloader): @property def model(self): - """ Getter of model in lpot.model """ + """ Getter of model in neural_compressor.model """ return self._model @model.setter @@ -337,7 +337,7 @@ def model(self, user_model): user_model: user are supported to set model from original framework model format (eg, tensorflow frozen_pb or path to a saved model), but not recommended. Best practice is to set from a initialized - lpot.experimental.common.Model. + neural_compressor.experimental.common.Model. If tensorflow model is used, model's inputs/outputs will be auto inferenced, but sometimes auto inferenced inputs/outputs will not meet your requests, @@ -348,7 +348,7 @@ def model(self, user_model): """ if not isinstance(user_model, BaseModel): - logger.warning("Force convert framework model to lpot model.") + logger.warning("Force convert framework model to neural_compressor model.") self._model = Model(user_model) else: self._model = user_model diff --git a/lpot/experimental/data/__init__.py b/neural_compressor/experimental/data/__init__.py similarity index 100% rename from lpot/experimental/data/__init__.py rename to neural_compressor/experimental/data/__init__.py diff --git a/lpot/experimental/data/dataloaders/__init__.py b/neural_compressor/experimental/data/dataloaders/__init__.py similarity index 100% rename from lpot/experimental/data/dataloaders/__init__.py rename to neural_compressor/experimental/data/dataloaders/__init__.py diff --git a/lpot/experimental/data/dataloaders/base_dataloader.py b/neural_compressor/experimental/data/dataloaders/base_dataloader.py similarity index 100% rename from lpot/experimental/data/dataloaders/base_dataloader.py rename to neural_compressor/experimental/data/dataloaders/base_dataloader.py diff --git a/lpot/experimental/data/dataloaders/dataloader.py b/neural_compressor/experimental/data/dataloaders/dataloader.py similarity index 100% rename from lpot/experimental/data/dataloaders/dataloader.py rename to neural_compressor/experimental/data/dataloaders/dataloader.py diff --git a/lpot/experimental/data/dataloaders/default_dataloader.py b/neural_compressor/experimental/data/dataloaders/default_dataloader.py similarity index 100% rename from lpot/experimental/data/dataloaders/default_dataloader.py rename to neural_compressor/experimental/data/dataloaders/default_dataloader.py diff --git a/lpot/experimental/data/dataloaders/engine_dataloader.py b/neural_compressor/experimental/data/dataloaders/engine_dataloader.py similarity index 97% rename from lpot/experimental/data/dataloaders/engine_dataloader.py rename to neural_compressor/experimental/data/dataloaders/engine_dataloader.py index d7d52be3320..5680c8ab164 100644 --- a/lpot/experimental/data/dataloaders/engine_dataloader.py +++ b/neural_compressor/experimental/data/dataloaders/engine_dataloader.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport from .base_dataloader import BaseDataLoader from .default_dataloader import DefaultDataLoader import numpy as np diff --git a/lpot/experimental/data/dataloaders/fetcher.py b/neural_compressor/experimental/data/dataloaders/fetcher.py similarity index 100% rename from lpot/experimental/data/dataloaders/fetcher.py rename to neural_compressor/experimental/data/dataloaders/fetcher.py diff --git a/lpot/experimental/data/dataloaders/mxnet_dataloader.py b/neural_compressor/experimental/data/dataloaders/mxnet_dataloader.py similarity index 96% rename from lpot/experimental/data/dataloaders/mxnet_dataloader.py rename to neural_compressor/experimental/data/dataloaders/mxnet_dataloader.py index f9eb249e1d8..2f9aa2e9e77 100644 --- a/lpot/experimental/data/dataloaders/mxnet_dataloader.py +++ b/neural_compressor/experimental/data/dataloaders/mxnet_dataloader.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport from .base_dataloader import BaseDataLoader import logging mx = LazyImport('mxnet') diff --git a/lpot/experimental/data/dataloaders/onnxrt_dataloader.py b/neural_compressor/experimental/data/dataloaders/onnxrt_dataloader.py similarity index 98% rename from lpot/experimental/data/dataloaders/onnxrt_dataloader.py rename to neural_compressor/experimental/data/dataloaders/onnxrt_dataloader.py index 1212aee4fa4..d47e8596e7c 100644 --- a/lpot/experimental/data/dataloaders/onnxrt_dataloader.py +++ b/neural_compressor/experimental/data/dataloaders/onnxrt_dataloader.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport from .base_dataloader import BaseDataLoader from .default_dataloader import DefaultDataLoader from ..datasets.bert_dataset import ONNXRTBertDataset diff --git a/lpot/experimental/data/dataloaders/pytorch_dataloader.py b/neural_compressor/experimental/data/dataloaders/pytorch_dataloader.py similarity index 97% rename from lpot/experimental/data/dataloaders/pytorch_dataloader.py rename to neural_compressor/experimental/data/dataloaders/pytorch_dataloader.py index e9f827c3884..8608758c196 100644 --- a/lpot/experimental/data/dataloaders/pytorch_dataloader.py +++ b/neural_compressor/experimental/data/dataloaders/pytorch_dataloader.py @@ -16,7 +16,7 @@ # limitations under the License. import numpy as np -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport from .base_dataloader import BaseDataLoader torch = LazyImport('torch') hvd = LazyImport('horovod.torch') diff --git a/lpot/experimental/data/dataloaders/sampler.py b/neural_compressor/experimental/data/dataloaders/sampler.py similarity index 100% rename from lpot/experimental/data/dataloaders/sampler.py rename to neural_compressor/experimental/data/dataloaders/sampler.py diff --git a/lpot/experimental/data/dataloaders/tensorflow_dataloader.py b/neural_compressor/experimental/data/dataloaders/tensorflow_dataloader.py similarity index 98% rename from lpot/experimental/data/dataloaders/tensorflow_dataloader.py rename to neural_compressor/experimental/data/dataloaders/tensorflow_dataloader.py index 9ac61602f94..b81dbde90a7 100644 --- a/lpot/experimental/data/dataloaders/tensorflow_dataloader.py +++ b/neural_compressor/experimental/data/dataloaders/tensorflow_dataloader.py @@ -15,8 +15,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lpot.experimental.data.datasets import dataset -from lpot.utils.utility import LazyImport +from neural_compressor.experimental.data.datasets import dataset +from neural_compressor.utils.utility import LazyImport from abc import abstractmethod import collections import numpy as np @@ -30,7 +30,7 @@ import logging tf = LazyImport('tensorflow') -lpot = LazyImport('lpot') +neural_compressor = LazyImport('neural_compressor') class TFDataDataLoader(BaseDataLoader): """In tensorflow1.x dataloader is coupled with the graph, but it also support feed_dict diff --git a/lpot/experimental/data/datasets/__init__.py b/neural_compressor/experimental/data/datasets/__init__.py similarity index 100% rename from lpot/experimental/data/datasets/__init__.py rename to neural_compressor/experimental/data/datasets/__init__.py diff --git a/lpot/experimental/data/datasets/bert_dataset.py b/neural_compressor/experimental/data/datasets/bert_dataset.py similarity index 99% rename from lpot/experimental/data/datasets/bert_dataset.py rename to neural_compressor/experimental/data/datasets/bert_dataset.py index fa60c5b8139..0e29c145e05 100644 --- a/lpot/experimental/data/datasets/bert_dataset.py +++ b/neural_compressor/experimental/data/datasets/bert_dataset.py @@ -21,7 +21,7 @@ import dataclasses from dataclasses import dataclass from typing import List, Optional, Union -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport from .dataset import dataset_registry, Dataset torch = LazyImport('torch') transformers = LazyImport('transformers') diff --git a/lpot/experimental/data/datasets/coco_dataset.py b/neural_compressor/experimental/data/datasets/coco_dataset.py similarity index 98% rename from lpot/experimental/data/datasets/coco_dataset.py rename to neural_compressor/experimental/data/datasets/coco_dataset.py index 86b3734b20b..0a886a68ab5 100644 --- a/lpot/experimental/data/datasets/coco_dataset.py +++ b/neural_compressor/experimental/data/datasets/coco_dataset.py @@ -31,7 +31,7 @@ # ============================================================================== import numpy as np from PIL import Image -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport from .dataset import dataset_registry, IterableDataset, Dataset tf = LazyImport('tensorflow') @@ -156,7 +156,7 @@ def __init__(self, root, img_dir='val2017', \ import os import numpy as np from pycocotools.coco import COCO - from lpot.experimental.metric.coco_label_map import category_map + from neural_compressor.experimental.metric.coco_label_map import category_map self.image_list = [] self.transform = transform img_path = os.path.join(root, img_dir) @@ -228,7 +228,7 @@ def __init__(self, root, npy_dir='val2017', \ import os import numpy as np from pycocotools.coco import COCO - from lpot.experimental.metric.coco_label_map import category_map + from neural_compressor.experimental.metric.coco_label_map import category_map self.image_list = [] npy_path = os.path.join(root, npy_dir) anno_path = os.path.join(root, anno_dir) diff --git a/lpot/experimental/data/datasets/dataset.py b/neural_compressor/experimental/data/datasets/dataset.py similarity index 99% rename from lpot/experimental/data/datasets/dataset.py rename to neural_compressor/experimental/data/datasets/dataset.py index 1fdc987e137..0cc4022b692 100644 --- a/lpot/experimental/data/datasets/dataset.py +++ b/neural_compressor/experimental/data/datasets/dataset.py @@ -17,7 +17,7 @@ from abc import abstractmethod import os -from lpot.utils.utility import LazyImport, singleton +from neural_compressor.utils.utility import LazyImport, singleton from PIL import Image torch = LazyImport('torch') torchvision = LazyImport('torchvision') @@ -104,7 +104,7 @@ def __init__(self): "onnxrt_integerops": ONNXRTITDatasets, "engine": EngineDatasets} -"""The datasets supported by lpot, it's model specific and can be configured by yaml file. +"""The datasets supported by neural_compressor, it's model specific and can be configured by yaml file. User could add new datasets by implementing new Dataset subclass under this directory. The naming convention of new dataset subclass should be something like ImageClassifier, user @@ -763,7 +763,7 @@ def __new__(cls, root, transform=None, filter=None): # pylint: disable=no-name-in-module from tensorflow.python.data.experimental import parallel_interleave - from lpot.experimental.data.transforms.imagenet_transform import ParseDecodeImagenet + from neural_compressor.experimental.data.transforms.imagenet_transform import ParseDecodeImagenet ds = tf.data.TFRecordDataset.list_files(file_names, shuffle=False) ds = ds.apply(parallel_interleave( tf.data.TFRecordDataset, cycle_length=len(file_names))) diff --git a/lpot/experimental/data/datasets/dummy_dataset.py b/neural_compressor/experimental/data/datasets/dummy_dataset.py similarity index 99% rename from lpot/experimental/data/datasets/dummy_dataset.py rename to neural_compressor/experimental/data/datasets/dummy_dataset.py index 963dbaf0be8..58e8d32f251 100644 --- a/lpot/experimental/data/datasets/dummy_dataset.py +++ b/neural_compressor/experimental/data/datasets/dummy_dataset.py @@ -17,7 +17,7 @@ from .dataset import dataset_registry, Dataset import numpy as np -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport import logging mx = LazyImport('mxnet') diff --git a/lpot/experimental/data/datasets/dummy_dataset_v2.py b/neural_compressor/experimental/data/datasets/dummy_dataset_v2.py similarity index 99% rename from lpot/experimental/data/datasets/dummy_dataset_v2.py rename to neural_compressor/experimental/data/datasets/dummy_dataset_v2.py index 1324991a391..6f376a1ca53 100644 --- a/lpot/experimental/data/datasets/dummy_dataset_v2.py +++ b/neural_compressor/experimental/data/datasets/dummy_dataset_v2.py @@ -18,7 +18,7 @@ import sys from .dataset import dataset_registry, IterableDataset import numpy as np -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport mx = LazyImport('mxnet') torch = LazyImport('torch') diff --git a/lpot/experimental/data/datasets/imagenet_dataset.py b/neural_compressor/experimental/data/datasets/imagenet_dataset.py similarity index 99% rename from lpot/experimental/data/datasets/imagenet_dataset.py rename to neural_compressor/experimental/data/datasets/imagenet_dataset.py index 3e1c04612c2..24f0c440d1d 100644 --- a/lpot/experimental/data/datasets/imagenet_dataset.py +++ b/neural_compressor/experimental/data/datasets/imagenet_dataset.py @@ -33,7 +33,7 @@ import re import numpy as np from PIL import Image -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport from .dataset import dataset_registry, IterableDataset, Dataset tf = LazyImport('tensorflow') mx = LazyImport('mxnet') diff --git a/lpot/experimental/data/datasets/style_transfer_dataset.py b/neural_compressor/experimental/data/datasets/style_transfer_dataset.py similarity index 100% rename from lpot/experimental/data/datasets/style_transfer_dataset.py rename to neural_compressor/experimental/data/datasets/style_transfer_dataset.py diff --git a/lpot/experimental/data/filters/__init__.py b/neural_compressor/experimental/data/filters/__init__.py similarity index 100% rename from lpot/experimental/data/filters/__init__.py rename to neural_compressor/experimental/data/filters/__init__.py diff --git a/lpot/experimental/data/filters/coco_filter.py b/neural_compressor/experimental/data/filters/coco_filter.py similarity index 95% rename from lpot/experimental/data/filters/coco_filter.py rename to neural_compressor/experimental/data/filters/coco_filter.py index 0590fdada45..61ea3e50a5e 100644 --- a/lpot/experimental/data/filters/coco_filter.py +++ b/neural_compressor/experimental/data/filters/coco_filter.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport from .filter import Filter, filter_registry tf = LazyImport('tensorflow') diff --git a/lpot/experimental/data/filters/filter.py b/neural_compressor/experimental/data/filters/filter.py similarity index 98% rename from lpot/experimental/data/filters/filter.py rename to neural_compressor/experimental/data/filters/filter.py index e2e270f9b27..965bbd9b961 100644 --- a/lpot/experimental/data/filters/filter.py +++ b/neural_compressor/experimental/data/filters/filter.py @@ -16,7 +16,7 @@ # limitations under the License. from abc import abstractmethod -from lpot.utils.utility import singleton +from neural_compressor.utils.utility import singleton @singleton class TensorflowFilters(object): diff --git a/lpot/experimental/data/transforms/__init__.py b/neural_compressor/experimental/data/transforms/__init__.py similarity index 100% rename from lpot/experimental/data/transforms/__init__.py rename to neural_compressor/experimental/data/transforms/__init__.py diff --git a/lpot/experimental/data/transforms/imagenet_transform.py b/neural_compressor/experimental/data/transforms/imagenet_transform.py similarity index 98% rename from lpot/experimental/data/transforms/imagenet_transform.py rename to neural_compressor/experimental/data/transforms/imagenet_transform.py index 6b7b048a12d..dbf4e140491 100644 --- a/lpot/experimental/data/transforms/imagenet_transform.py +++ b/neural_compressor/experimental/data/transforms/imagenet_transform.py @@ -31,7 +31,7 @@ # ============================================================================== import numpy as np -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport from .transform import transform_registry, BaseTransform tf = LazyImport('tensorflow') cv2 = LazyImport('cv2') diff --git a/lpot/experimental/data/transforms/tokenization.py b/neural_compressor/experimental/data/transforms/tokenization.py similarity index 99% rename from lpot/experimental/data/transforms/tokenization.py rename to neural_compressor/experimental/data/transforms/tokenization.py index 03123a59ddb..ce4e403651c 100644 --- a/lpot/experimental/data/transforms/tokenization.py +++ b/neural_compressor/experimental/data/transforms/tokenization.py @@ -35,7 +35,7 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport import collections import re import unicodedata diff --git a/lpot/experimental/data/transforms/transform.py b/neural_compressor/experimental/data/transforms/transform.py similarity index 99% rename from lpot/experimental/data/transforms/transform.py rename to neural_compressor/experimental/data/transforms/transform.py index e0e547c3484..d707480215e 100644 --- a/lpot/experimental/data/transforms/transform.py +++ b/neural_compressor/experimental/data/transforms/transform.py @@ -18,8 +18,8 @@ import numpy as np import collections from abc import abstractmethod -from lpot.utils.utility import LazyImport, singleton -from lpot.utils import logger +from neural_compressor.utils.utility import LazyImport, singleton +from neural_compressor.utils import logger torchvision = LazyImport('torchvision') torch = LazyImport('torch') @@ -1798,7 +1798,7 @@ def __call__(self, sample): return (image, label) def _compute_softmax(scores): - """Compute softmax probablpoty over raw logits.""" + """Compute softmax probability over raw logits.""" import math if not scores: return [] @@ -2319,7 +2319,7 @@ def __call__(self, sample): for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text - output["probablpoty"] = probs[i] + output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) diff --git a/lpot/experimental/distillation.py b/neural_compressor/experimental/distillation.py similarity index 95% rename from lpot/experimental/distillation.py rename to neural_compressor/experimental/distillation.py index 71977ab0697..17b3817479a 100644 --- a/lpot/experimental/distillation.py +++ b/neural_compressor/experimental/distillation.py @@ -23,7 +23,7 @@ from ..model import BaseModel from .common import Model from ..adaptor import FRAMEWORKS -from lpot.experimental.common import Criterions, Optimizers +from neural_compressor.experimental.common import Criterions, Optimizers from ..conf.config import Distillation_Conf class Distillation(Component): @@ -102,7 +102,7 @@ def pre_process(self): self.adaptor = FRAMEWORKS[self.framework](framework_specific_info) self.generate_hooks() - assert isinstance(self._model, BaseModel), 'need set lpot Model for distillation....' + assert isinstance(self._model, BaseModel), 'need set neural_compressor Model for distillation....' if self._train_dataloader is None and self._train_func is None: train_dataloader_cfg = self.cfg.distillation.train.dataloader @@ -212,7 +212,7 @@ def __call__(self): and evaluation phase by code. The tool provides built-in dataloaders and evaluators, user just need provide a dataset implemented __iter__ or __getitem__ methods and invoke dataloader() - with dataset as input parameter to create lpot dataloader before calling this + with dataset as input parameter to create neural_compressor dataloader before calling this function. After that, User specifies fp32 "model", training dataset "train_dataloader" @@ -256,7 +256,7 @@ def optimizer(self, user_optimizer): @property def teacher_model(self): - """ Getter of model in lpot.model """ + """ Getter of model in neural_compressor.model """ return self._teacher_model @teacher_model.setter @@ -267,7 +267,7 @@ def teacher_model(self, user_model): user_model: user are supported to set model from original framework model format (eg, tensorflow frozen_pb or path to a saved model), but not recommended. Best practice is to set from a initialized - lpot.experimental.common.Model. + neural_compressor.experimental.common.Model. If tensorflow model is used, model's inputs/outputs will be auto inferenced, but sometimes auto inferenced inputs/outputs will not meet your requests, @@ -278,14 +278,14 @@ def teacher_model(self, user_model): """ if not isinstance(user_model, BaseModel): - logger.warning("Force convert framework model to lpot model.") + logger.warning("Force convert framework model to neural_compressor model.") self._teacher_model = Model(user_model) else: self._teacher_model = user_model @property def student_model(self): - """ Getter of model in lpot.model """ + """ Getter of model in neural_compressor.model """ return self._model @student_model.setter @@ -296,7 +296,7 @@ def student_model(self, user_model): user_model: user are supported to set model from original framework model format (eg, tensorflow frozen_pb or path to a saved model), but not recommended. Best practice is to set from a initialized - lpot.experimental.common.Model. + neural_compressor.experimental.common.Model. If tensorflow model is used, model's inputs/outputs will be auto inferenced, but sometimes auto inferenced inputs/outputs will not meet your requests, @@ -307,7 +307,7 @@ def student_model(self, user_model): """ if not isinstance(user_model, BaseModel): - logger.warning("Force convert framework model to lpot model.") + logger.warning("Force convert framework model to neural_compressor model.") self._model = Model(user_model) else: self._model = user_model diff --git a/lpot/experimental/graph_optimization.py b/neural_compressor/experimental/graph_optimization.py similarity index 89% rename from lpot/experimental/graph_optimization.py rename to neural_compressor/experimental/graph_optimization.py index e4264527cf6..3ce9791e113 100644 --- a/lpot/experimental/graph_optimization.py +++ b/neural_compressor/experimental/graph_optimization.py @@ -28,7 +28,7 @@ from ..utils import logger from ..utils.create_obj_from_config import create_dataloader from ..utils.utility import CpuInfo, time_limit, set_backend -from .common import Model as LpotModel +from .common import Model as NCModel from ..model import BaseModel class Graph_Optimization(): @@ -79,7 +79,7 @@ def __init__(self, conf_fname_or_obj=None): def __call__(self): """The main entry point of graph optimization process. - This interface works on all the DL frameworks that lpot supports + This interface works on all the DL frameworks that neural_compressor supports and provides three usages: a) Fully yaml configuration: User specifies all the info through yaml, including dataloaders used in calibration and evaluation phases @@ -91,7 +91,7 @@ def __call__(self): and evaluation phase by code. The tool provides built-in dataloaders and evaluators, user just need provide a dataset implemented __iter__ or __getitem__ methods and invoke dataloader() - with dataset as input parameter to create lpot dataloader before calling this + with dataset as input parameter to create neural_compressor dataloader before calling this function. After that, User specifies fp32 "model", calibration dataset "calib_dataloader" @@ -256,16 +256,16 @@ def eval_dataloader(self, dataloader): dataloader(generator): user are supported to set a user defined dataloader which meet the requirements that can yield tuple of (input, label)/(input, _) batched data. - Another good practice is to use lpot.common.DataLoader - to initialize a lpot dataloader object. - Notice lpot.common.DataLoader is just a wrapper of the + Another good practice is to use neural_compressor.common.DataLoader + to initialize a neural_compressor dataloader object. + Notice neural_compressor.common.DataLoader is just a wrapper of the information needed to build a dataloader, it can't yield batched data and only in this setter method a 'real' eval_dataloader will be created, the reason is we have to know the framework info and only after the Quantization object created then framework infomation can be known. Future we will support - creating iterable dataloader from lpot.common.DataLoader + creating iterable dataloader from neural_compressor.common.DataLoader """ from .common import _generate_common_dataloader @@ -283,7 +283,7 @@ def model(self, user_model): Args: user_model: user are supported to set model from original framework model format (eg, tensorflow frozen_pb or path to a saved model), but not recommended. - Best practice is to set from a initialized lpot.common.Model. + Best practice is to set from a initialized neural_compressor.common.Model. If tensorflow model is used, model's inputs/outputs will be auto inferred, but sometimes auto inferred inputs/outputs will not meet your requests, set them manually in config yaml file. Another corner case is slim model @@ -293,8 +293,8 @@ def model(self, user_model): """ if not isinstance(user_model, BaseModel): - logger.warning("Force convert framework model to lpot model.") - self._model = LpotModel(user_model) + logger.warning("Force convert framework model to neural_compressor model.") + self._model = NCModel(user_model) else: self._model = user_model @@ -310,23 +310,23 @@ def metric(self): @metric.setter def metric(self, user_metric): - """Set metric class and lpot will initialize this class when evaluation - lpot have many built-in metrics, but user can set specific metric through + """Set metric class and neural_compressor will initialize this class when evaluation + neural_compressor have many built-in metrics, but user can set specific metric through this api. The metric class should take the outputs of the model or - postprocess(if have) as inputs, lpot built-in metric always take + postprocess(if have) as inputs, neural_compressor built-in metric always take (predictions, labels) as inputs for update, - and user_metric.metric_cls should be sub_class of lpot.metric.BaseMetric. + and user_metric.metric_cls should be sub_class of neural_compressor.metric.BaseMetric. Args: - user_metric(lpot.common.Metric): user_metric should be object initialized from - lpot.common.Metric, in this method the + user_metric(neural_compressor.common.Metric): user_metric should be object initialized from + neural_compressor.common.Metric, in this method the user_metric.metric_cls will be registered to specific frameworks and initialized. """ - from .common import Metric as LpotMetric - assert isinstance(user_metric, LpotMetric), \ - 'please initialize a lpot.common.Metric and set....' + from .common import Metric as NCMetric + assert isinstance(user_metric, NCMetric), \ + 'please initialize a neural_compressor.common.Metric and set....' metric_cfg = {user_metric.name : {**user_metric.kwargs}} if deep_get(self.conf.usr_cfg, "evaluation.accuracy.metric"): @@ -345,21 +345,21 @@ def postprocess(self, user_postprocess): @postprocess.setter def postprocess(self, user_postprocess): - """Set postprocess class and lpot will initialize this class when evaluation. + """Set postprocess class and neural_compressor will initialize this class when evaluation. The postprocess class should take the outputs of the model as inputs, and output (predictions, labels) as inputs for metric update. - user_postprocess.postprocess_cls should be sub_class of lpot.data.BaseTransform. + user_postprocess.postprocess_cls should be sub_class of neural_compressor.data.BaseTransform. Args: - user_postprocess(lpot.common.Postprocess): - user_postprocess should be object initialized from lpot.common.Postprocess, + user_postprocess(neural_compressor.common.Postprocess): + user_postprocess should be object initialized from neural_compressor.common.Postprocess, in this method the user_postprocess.postprocess_cls will be registered to specific frameworks and initialized. """ - from .common import Postprocess as LpotPostprocess - assert isinstance(user_postprocess, LpotPostprocess), \ - 'please initialize a lpot.common.Postprocess and set....' + from .common import Postprocess as NCPostprocess + assert isinstance(user_postprocess, NCPostprocess), \ + 'please initialize a neural_compressor.common.Postprocess and set....' postprocess_cfg = {user_postprocess.name : {**user_postprocess.kwargs}} if deep_get(self.conf.usr_cfg, "evaluation.accuracy.postprocess"): logger.warning("Override the value of `postprocess` field defined in yaml file" \ diff --git a/lpot/experimental/metric/__init__.py b/neural_compressor/experimental/metric/__init__.py similarity index 100% rename from lpot/experimental/metric/__init__.py rename to neural_compressor/experimental/metric/__init__.py diff --git a/lpot/experimental/metric/bleu.py b/neural_compressor/experimental/metric/bleu.py similarity index 100% rename from lpot/experimental/metric/bleu.py rename to neural_compressor/experimental/metric/bleu.py diff --git a/lpot/experimental/metric/bleu_util.py b/neural_compressor/experimental/metric/bleu_util.py similarity index 98% rename from lpot/experimental/metric/bleu_util.py rename to neural_compressor/experimental/metric/bleu_util.py index 9426f0aafb5..300b588f086 100644 --- a/lpot/experimental/metric/bleu_util.py +++ b/neural_compressor/experimental/metric/bleu_util.py @@ -45,7 +45,7 @@ import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin -from lpot.utils.utility import LazyImport +from neural_compressor.utils.utility import LazyImport tf = LazyImport('tensorflow') def _get_ngrams_with_counter(segment, max_order): diff --git a/lpot/experimental/metric/coco_label_map.py b/neural_compressor/experimental/metric/coco_label_map.py similarity index 100% rename from lpot/experimental/metric/coco_label_map.py rename to neural_compressor/experimental/metric/coco_label_map.py diff --git a/lpot/experimental/metric/coco_tools.py b/neural_compressor/experimental/metric/coco_tools.py similarity index 99% rename from lpot/experimental/metric/coco_tools.py rename to neural_compressor/experimental/metric/coco_tools.py index 8634cb9ca14..1ad510e4155 100644 --- a/lpot/experimental/metric/coco_tools.py +++ b/neural_compressor/experimental/metric/coco_tools.py @@ -60,7 +60,7 @@ import copy import time import numpy as np -from lpot.utils import logger +from neural_compressor.utils import logger from pycocotools import coco from pycocotools import cocoeval diff --git a/lpot/experimental/metric/evaluate_squad.py b/neural_compressor/experimental/metric/evaluate_squad.py similarity index 100% rename from lpot/experimental/metric/evaluate_squad.py rename to neural_compressor/experimental/metric/evaluate_squad.py diff --git a/lpot/experimental/metric/f1.py b/neural_compressor/experimental/metric/f1.py similarity index 98% rename from lpot/experimental/metric/f1.py rename to neural_compressor/experimental/metric/f1.py index 478456393d0..5fca16aa8cc 100644 --- a/lpot/experimental/metric/f1.py +++ b/neural_compressor/experimental/metric/f1.py @@ -20,7 +20,7 @@ from collections import Counter, abc import string import re -from lpot.utils import logger +from neural_compressor.utils import logger def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" diff --git a/lpot/experimental/metric/metric.py b/neural_compressor/experimental/metric/metric.py similarity index 99% rename from lpot/experimental/metric/metric.py rename to neural_compressor/experimental/metric/metric.py index 94678bdbaeb..25e195f89f8 100644 --- a/lpot/experimental/metric/metric.py +++ b/neural_compressor/experimental/metric/metric.py @@ -17,8 +17,8 @@ from abc import abstractmethod from collections import Counter -from lpot.utils.utility import LazyImport, singleton -from lpot.utils import logger +from neural_compressor.utils.utility import LazyImport, singleton +from neural_compressor.utils import logger from sklearn.metrics import accuracy_score import numpy as np import collections diff --git a/lpot/experimental/model_conversion.py b/neural_compressor/experimental/model_conversion.py similarity index 86% rename from lpot/experimental/model_conversion.py rename to neural_compressor/experimental/model_conversion.py index 6ff41aa093a..ec29ca0dc26 100644 --- a/lpot/experimental/model_conversion.py +++ b/neural_compressor/experimental/model_conversion.py @@ -22,24 +22,24 @@ import datetime import numpy as np import yaml -from lpot.adaptor import FRAMEWORKS +from neural_compressor.adaptor import FRAMEWORKS from ..conf.config import Conf from ..conf.dotdict import deep_get, deep_set, DotDict from ..strategy import STRATEGIES from ..utils import logger from ..utils.create_obj_from_config import create_dataloader, create_eval_func from ..utils.utility import CpuInfo, set_backend -from .common import Model as LpotModel +from .common import Model as NCModel from ..model import BaseModel class ModelConversion(): """ModelConversion class is used to convert one model format to another. - Currently LPOT only supports Quantization-aware training TensorFlow model to Default + Currently Neural Compressor only supports Quantization-aware training TensorFlow model to Default quantized model. The typical usage is: - from lpot.experimental import ModelConversion, common + from neural_compressor.experimental import ModelConversion, common conversion = ModelConversion() conversion.source = 'QAT' conversion.destination = 'default' @@ -106,7 +106,7 @@ def __call__(self): q_model = self.adaptor.convert(self._model, self._source, self._destination) # when eval_func is None but metric or _eval_dataloader is set by yaml or code, - # it means LPOT will create the eval_func from these info. + # it means Neural Compressor will create the eval_func from these info. metric_cfg = deep_get(cfg, 'evaluation.accuracy.metric') postprocess_cfg = deep_get(cfg, 'evaluation.accuracy.postprocess') if self._eval_func is None and metric_cfg: @@ -182,16 +182,16 @@ def eval_dataloader(self, dataloader): dataloader(generator): user are supported to set a user defined dataloader which meet the requirements that can yield tuple of (input, label)/(input, _) batched data. - Another good practice is to use lpot.common.DataLoader - to initialize a lpot dataloader object. - Notice lpot.common.DataLoader is just a wrapper of the + Another good practice is to use neural_compressor.common.DataLoader + to initialize a neural_compressor dataloader object. + Notice neural_compressor.common.DataLoader is just a wrapper of the information needed to build a dataloader, it can't yield batched data and only in this setter method a 'real' eval_dataloader will be created, the reason is we have to know the framework info and only after the Quantization object created then framework infomation can be known. Future we will support - creating iterable dataloader from lpot.common.DataLoader + creating iterable dataloader from neural_compressor.common.DataLoader """ from .common import _generate_common_dataloader @@ -209,7 +209,7 @@ def model(self, user_model): Args: user_model: user are supported to set model from original framework model format (eg, tensorflow frozen_pb or path to a saved model), but not recommended. - Best practice is to set from a initialized lpot.common.Model. + Best practice is to set from a initialized neural_compressor.common.Model. If tensorflow model is used, model's inputs/outputs will be auto inferred, but sometimes auto inferred inputs/outputs will not meet your requests, set them manually in config yaml file. Another corner case is slim model @@ -218,8 +218,8 @@ def model(self, user_model): """ if not isinstance(user_model, BaseModel): - logger.warning("Force convert framework model to lpot model.") - self._model = LpotModel(user_model) + logger.warning("Force convert framework model to neural_compressor model.") + self._model = NCModel(user_model) else: self._model = user_model @@ -241,23 +241,23 @@ def metric(self): @metric.setter def metric(self, user_metric): - """Set metric class and lpot will initialize this class when evaluation - lpot have many built-in metrics, but user can set specific metric through + """Set metric class and neural_compressor will initialize this class when evaluation + neural_compressor have many built-in metrics, but user can set specific metric through this api. The metric class should take the outputs of the model or - postprocess(if have) as inputs, lpot built-in metric always take + postprocess(if have) as inputs, neural_compressor built-in metric always take (predictions, labels) as inputs for update, - and user_metric.metric_cls should be sub_class of lpot.metric.BaseMetric. + and user_metric.metric_cls should be sub_class of neural_compressor.metric.BaseMetric. Args: - user_metric(lpot.common.Metric): user_metric should be object initialized from - lpot.common.Metric, in this method the + user_metric(neural_compressor.common.Metric): user_metric should be object initialized from + neural_compressor.common.Metric, in this method the user_metric.metric_cls will be registered to specific frameworks and initialized. """ - from .common import Metric as LpotMetric - assert isinstance(user_metric, LpotMetric), \ - 'please initialize a lpot.common.Metric and set....' + from .common import Metric as NCMetric + assert isinstance(user_metric, NCMetric), \ + 'please initialize a neural_compressor.common.Metric and set....' metric_cfg = {user_metric.name : {**user_metric.kwargs}} if deep_get(self.conf.usr_cfg, "evaluation.accuracy.metric"): @@ -276,21 +276,21 @@ def postprocess(self, user_postprocess): @postprocess.setter def postprocess(self, user_postprocess): - """Set postprocess class and lpot will initialize this class when evaluation. + """Set postprocess class and neural_compressor will initialize this class when evaluation. The postprocess class should take the outputs of the model as inputs, and output (predictions, labels) as inputs for metric update. - user_postprocess.postprocess_cls should be sub_class of lpot.data.BaseTransform. + user_postprocess.postprocess_cls should be sub_class of neural_compressor.data.BaseTransform. Args: - user_postprocess(lpot.common.Postprocess): - user_postprocess should be object initialized from lpot.common.Postprocess, + user_postprocess(neural_compressor.common.Postprocess): + user_postprocess should be object initialized from neural_compressor.common.Postprocess, in this method the user_postprocess.postprocess_cls will be registered to specific frameworks and initialized. """ - from .common import Postprocess as LpotPostprocess - assert isinstance(user_postprocess, LpotPostprocess), \ - 'please initialize a lpot.common.Postprocess and set....' + from .common import Postprocess as NCPostprocess + assert isinstance(user_postprocess, NCPostprocess), \ + 'please initialize a neural_compressor.common.Postprocess and set....' postprocess_cfg = {user_postprocess.name : {**user_postprocess.kwargs}} if deep_get(self.conf.usr_cfg, "evaluation.accuracy.postprocess"): logger.warning("Override the value of `postprocess` field defined in yaml file" \ diff --git a/lpot/experimental/pruning.py b/neural_compressor/experimental/pruning.py similarity index 98% rename from lpot/experimental/pruning.py rename to neural_compressor/experimental/pruning.py index 4f9871f3166..d2a3a84fc7e 100644 --- a/lpot/experimental/pruning.py +++ b/neural_compressor/experimental/pruning.py @@ -98,7 +98,7 @@ def _post_epoch_end(self): pruner.post_epoch_end() def pre_process(self): - assert isinstance(self._model, BaseModel), 'need set lpot Model for pruning....' + assert isinstance(self._model, BaseModel), 'need set neural_compressor Model for pruning....' framework_specific_info = {'device': self.cfg.device, 'random_seed': self.cfg.tuning.random_seed, @@ -207,7 +207,7 @@ def __call__(self): and evaluation phase by code. The tool provides built-in dataloaders and evaluators, user just need provide a dataset implemented __iter__ or __getitem__ methods and invoke dataloader() - with dataset as input parameter to create lpot dataloader before calling this + with dataset as input parameter to create neural_compressor dataloader before calling this function. After that, User specifies fp32 "model", training dataset "p_dataloader" diff --git a/lpot/experimental/pruning_recipes/__init__.py b/neural_compressor/experimental/pruning_recipes/__init__.py similarity index 100% rename from lpot/experimental/pruning_recipes/__init__.py rename to neural_compressor/experimental/pruning_recipes/__init__.py diff --git a/lpot/experimental/pruning_recipes/patterns/__init__.py b/neural_compressor/experimental/pruning_recipes/patterns/__init__.py similarity index 100% rename from lpot/experimental/pruning_recipes/patterns/__init__.py rename to neural_compressor/experimental/pruning_recipes/patterns/__init__.py diff --git a/lpot/experimental/pruning_recipes/patterns/pattern.py b/neural_compressor/experimental/pruning_recipes/patterns/pattern.py similarity index 100% rename from lpot/experimental/pruning_recipes/patterns/pattern.py rename to neural_compressor/experimental/pruning_recipes/patterns/pattern.py diff --git a/lpot/experimental/pruning_recipes/patterns/tile_pattern.py b/neural_compressor/experimental/pruning_recipes/patterns/tile_pattern.py similarity index 100% rename from lpot/experimental/pruning_recipes/patterns/tile_pattern.py rename to neural_compressor/experimental/pruning_recipes/patterns/tile_pattern.py diff --git a/lpot/experimental/quantization.py b/neural_compressor/experimental/quantization.py similarity index 89% rename from lpot/experimental/quantization.py rename to neural_compressor/experimental/quantization.py index 548f68f7754..bc241c4d411 100644 --- a/lpot/experimental/quantization.py +++ b/neural_compressor/experimental/quantization.py @@ -26,7 +26,7 @@ from ..utils.utility import time_limit from ..utils.create_obj_from_config import create_dataloader from ..adaptor import FRAMEWORKS -from .common import Model as LpotModel +from .common import Model as NCModel from ..model import BaseModel from ..conf.config import Quantization_Conf from ..utils.utility import set_backend @@ -171,7 +171,7 @@ def execute(self): def __call__(self): """The main entry point of automatic quantization tuning. - This interface works on all the DL frameworks that lpot supports + This interface works on all the DL frameworks that neural_compressor supports and provides three usages: a) Fully yaml configuration: User specifies all the info through yaml, including dataloaders used in calibration and evaluation phases @@ -183,7 +183,7 @@ def __call__(self): and evaluation phase by code. The tool provides built-in dataloaders and evaluators, user just need provide a dataset implemented __iter__ or __getitem__ methods and invoke dataloader() - with dataset as input parameter to create lpot dataloader before calling this + with dataset as input parameter to create neural_compressor dataloader before calling this function. After that, User specifies fp32 "model", calibration dataset "calib_dataloader" @@ -234,9 +234,9 @@ def calib_dataloader(self, dataloader): dataloader(generator): user are supported to set a user defined dataloader which meet the requirements that can yield tuple of (input, label)/(input, _) batched data. Another good - practice is to use lpot.experimental.common.DataLoader - to initialize a lpot dataloader object. Notice - lpot.experimental.common.DataLoader is just a wrapper of the + practice is to use neural_compressor.experimental.common.DataLoader + to initialize a neural_compressor dataloader object. Notice + neural_compressor.experimental.common.DataLoader is just a wrapper of the information needed to build a dataloader, it can't yield batched data and only in this setter method a 'real' calib_dataloader will be created, @@ -244,7 +244,7 @@ def calib_dataloader(self, dataloader): and only after the Quantization object created then framework infomation can be known. Future we will support creating iterable dataloader - from lpot.experimental.common.DataLoader + from neural_compressor.experimental.common.DataLoader """ from .common import _generate_common_dataloader self._calib_dataloader = _generate_common_dataloader( @@ -257,24 +257,24 @@ def metric(self): @metric.setter def metric(self, user_metric): - """Set metric class and lpot will initialize this class when evaluation - lpot have many built-in metrics, but user can set specific metric through + """Set metric class and neural_compressor will initialize this class when evaluation + neural_compressor have many built-in metrics, but user can set specific metric through this api. The metric class should take the outputs of the model or - postprocess(if have) as inputs, lpot built-in metric always take + postprocess(if have) as inputs, neural_compressor built-in metric always take (predictions, labels) as inputs for update, - and user_metric.metric_cls should be sub_class of lpot.metric.BaseMetric. + and user_metric.metric_cls should be sub_class of neural_compressor.metric.BaseMetric. Args: - user_metric(lpot.experimental.common.Metric): + user_metric(neural_compressor.experimental.common.Metric): user_metric should be object initialized from - lpot.experimental.common.Metric, in this method the + neural_compressor.experimental.common.Metric, in this method the user_metric.metric_cls will be registered to specific frameworks and initialized. """ - from .common import Metric as LpotMetric - assert isinstance(user_metric, LpotMetric), \ - 'please initialize a lpot.experimental.common.Metric and set....' + from .common import Metric as NCMetric + assert isinstance(user_metric, NCMetric), \ + 'please initialize a neural_compressor.experimental.common.Metric and set....' metric_cfg = {user_metric.name : {**user_metric.kwargs}} if deep_get(self.conf.usr_cfg, "evaluation.accuracy.metric"): @@ -293,22 +293,22 @@ def postprocess(self, user_postprocess): @postprocess.setter def postprocess(self, user_postprocess): - """Set postprocess class and lpot will initialize this class when evaluation. + """Set postprocess class and neural_compressor will initialize this class when evaluation. The postprocess class should take the outputs of the model as inputs, and output (predictions, labels) as inputs for metric update. - user_postprocess.postprocess_cls should be sub_class of lpot.data.BaseTransform. + user_postprocess.postprocess_cls should be sub_class of neural_compressor.data.BaseTransform. Args: - user_postprocess(lpot.experimental.common.Postprocess): + user_postprocess(neural_compressor.experimental.common.Postprocess): user_postprocess should be object initialized from - lpot.experimental.common.Postprocess, + neural_compressor.experimental.common.Postprocess, in this method the user_postprocess.postprocess_cls will be registered to specific frameworks and initialized. """ - from .common import Postprocess as LpotPostprocess - assert isinstance(user_postprocess, LpotPostprocess), \ - 'please initialize a lpot.experimental.common.Postprocess and set....' + from .common import Postprocess as NCPostprocess + assert isinstance(user_postprocess, NCPostprocess), \ + 'please initialize a neural_compressor.experimental.common.Postprocess and set....' postprocess_cfg = {user_postprocess.name : {**user_postprocess.kwargs}} if deep_get(self.conf.usr_cfg, "evaluation.accuracy.postprocess"): logger.warning("Override the value of `postprocess` field defined in yaml file" \ diff --git a/lpot/experimental/scheduler.py b/neural_compressor/experimental/scheduler.py similarity index 93% rename from lpot/experimental/scheduler.py rename to neural_compressor/experimental/scheduler.py index faad40f48fa..d15cc13b5e0 100644 --- a/lpot/experimental/scheduler.py +++ b/neural_compressor/experimental/scheduler.py @@ -20,7 +20,7 @@ from ..conf.config import Conf from ..utils import logger -from .common import Model as LpotModel +from .common import Model as NCModel from ..model import BaseModel from .common import Metric, Postprocess from ..strategy import STRATEGIES @@ -44,15 +44,15 @@ ] class Scheduler(object): - """Scheduler for lpot component pipeline execution. + """Scheduler for neural_compressor component pipeline execution. - LPOT supports serveral seperate components: Quantization, Pruning, Benchmarking. + Neural Compressor supports serveral seperate components: Quantization, Pruning, Benchmarking. This scheduler will sequentially execute specified components by the order of appending. This interface provids an unique entry to pipeline execute all supported components. There are two typical usages: - 1) if all informations are set in user configuration yaml files by using lpot built-in + 1) if all informations are set in user configuration yaml files by using neural_compressor built-in dataloaders/datasets/metrics, the code usage is like below: prune = Pruning('/path/to/pruning.yaml') @@ -65,7 +65,7 @@ class Scheduler(object): opt_model.save() - 2) if lpot built-in dataloaders/datasets/metrics could not fully meet user requirements, + 2) if neural_compressor built-in dataloaders/datasets/metrics could not fully meet user requirements, customized dataloaders/datasets/metrics are needed, the code usage is like below: prune = Pruning('/path/to/pruning.yaml') @@ -92,7 +92,7 @@ def __init__(self): self.components = [] def append(self, *args): - """Add lpot component into pipeline for sequential execution. + """Add neural_compressor component into pipeline for sequential execution. Args: conf_fname_or_obj (string or obj): The path to user configuration yaml file or @@ -105,7 +105,7 @@ def append(self, *args): or Data loader for training phase of Pruning, if its corresponding field is not configured in yaml. eval_dataloader (generator): Optional. Data loader for evaluation phase of all - lpot components, if its corresponding field is not + neural_compressor components, if its corresponding field is not configured in yaml and eval_func is not specified. postprocess (Postprocess): Optional. Object initialized from common.Postprocess. metric (Metric): Optional. Object initialized from common.Metric. @@ -150,10 +150,10 @@ def __call__(self): return model def combine(self, *args): - """Combine lpot components into a new component. + """Combine neural_compressor components into a new component. Args: args (Component): Components to be combined together. Input Component should be - supported in LPOT and pass the sanity check during combine. The illegal combination + supported in Neural Compressor and pass the sanity check during combine. The illegal combination (e.g. Components uses different frameworks) returns an error. Returns: @@ -297,7 +297,7 @@ def model(self, user_model): user_model: user are supported to set model from original framework model format (eg, tensorflow frozen_pb or path to a saved model), but not recommended. Best practice is to set from a initialized - lpot.experimental.common.Model. + neural_compressor.experimental.common.Model. If tensorflow model is used, model's inputs/outputs will be auto inferenced, but sometimes auto inferenced inputs/outputs will not meet your requests, @@ -308,8 +308,8 @@ def model(self, user_model): """ if not isinstance(user_model, BaseModel): - logger.warning("Force convert framework model to lpot model.") - self._model = LpotModel(user_model) + logger.warning("Force convert framework model to neural_compressor model.") + self._model = NCModel(user_model) else: self._model = user_model diff --git a/lpot/metric/__init__.py b/neural_compressor/metric/__init__.py similarity index 100% rename from lpot/metric/__init__.py rename to neural_compressor/metric/__init__.py diff --git a/lpot/model/__init__.py b/neural_compressor/model/__init__.py similarity index 100% rename from lpot/model/__init__.py rename to neural_compressor/model/__init__.py diff --git a/lpot/model/base_model.py b/neural_compressor/model/base_model.py similarity index 94% rename from lpot/model/base_model.py rename to neural_compressor/model/base_model.py index da784bf4fc5..8920eed2d44 100644 --- a/lpot/model/base_model.py +++ b/neural_compressor/model/base_model.py @@ -18,7 +18,7 @@ from abc import abstractmethod class BaseModel: - ''' base class of all lpot.model, will play graph role''' + ''' base class of all neural_compressor.model, will play graph role''' def __init__(self, model, **kwargs): pass diff --git a/lpot/model/engine_model.py b/neural_compressor/model/engine_model.py similarity index 98% rename from lpot/model/engine_model.py rename to neural_compressor/model/engine_model.py index de0cc420766..f25f7b58057 100644 --- a/lpot/model/engine_model.py +++ b/neural_compressor/model/engine_model.py @@ -19,7 +19,7 @@ import copy import logging from pathlib import Path -from lpot.model.base_model import BaseModel +from neural_compressor.model.base_model import BaseModel logger = logging.getLogger() diff --git a/lpot/model/model.py b/neural_compressor/model/model.py similarity index 97% rename from lpot/model/model.py rename to neural_compressor/model/model.py index e6fe0f27faf..3b50d0ea8ce 100644 --- a/lpot/model/model.py +++ b/neural_compressor/model/model.py @@ -21,13 +21,13 @@ import inspect from collections import OrderedDict from abc import abstractmethod -from lpot.utils.utility import LazyImport, compute_sparsity -from lpot.utils import logger -from lpot.conf.dotdict import deep_get, deep_set -from lpot.conf import config as cfg -from lpot.model.base_model import BaseModel -from lpot.model.onnx_model import ONNXModel -from lpot.model.engine_model import EngineModel +from neural_compressor.utils.utility import LazyImport, compute_sparsity +from neural_compressor.utils import logger +from neural_compressor.conf.dotdict import deep_get, deep_set +from neural_compressor.conf import config as cfg +from neural_compressor.model.base_model import BaseModel +from neural_compressor.model.onnx_model import ONNXModel +from neural_compressor.model.engine_model import EngineModel torch = LazyImport('torch') tf = LazyImport('tensorflow') @@ -49,7 +49,7 @@ def get_model_type(model): type (string): model type """ - from lpot.adaptor.tf_utils.util import is_saved_model_format, is_ckpt_format + from neural_compressor.adaptor.tf_utils.util import is_saved_model_format, is_ckpt_format if isinstance(model, tf.Graph): return 'graph' elif isinstance(model, tf.compat.v1.GraphDef): @@ -108,7 +108,7 @@ def get_model_fwk_name(model): """Detect the input model belongs to which framework Args: - model (string): framework name that supported by LPOT, if there's no available fwk info, + model (string): framework name that supported by Neural Compressor, if there's no available fwk info, then return 'NA'. """ def _is_onnxruntime(model): @@ -155,9 +155,9 @@ def _is_mxnet(model): assert os.path.exists(absmodel) or os.path.exists(absmodel+'.pb'), \ 'invalid input path, the file does not exist!' - #check if the input model is a lpot model - for name, lpotmodel in MODELS.items(): - if isinstance(model, lpotmodel): + #check if the input model is a neural_compressor model + for name, nc_model in MODELS.items(): + if isinstance(model, nc_model): return 'pytorch' if name == 'pytorch_ipex' or name == 'pytorch_fx' else name if isinstance(model, TensorflowBaseModel): return 'tensorflow' @@ -226,7 +226,7 @@ def validate_and_inference_input_output(graph_def, \ input_tensor_names (list of string): validated input_tensor_names output_tensor_names (list of string): validated output_tensor_names """ - from lpot.adaptor.tf_utils.util import get_input_output_node_names + from neural_compressor.adaptor.tf_utils.util import get_input_output_node_names temp_output_tensor_names = [] if validate_graph_node(graph_def, tensor_to_node(input_tensor_names)): input_tensor_names = input_tensor_names @@ -287,8 +287,8 @@ def graph_def_session(model, input_tensor_names, output_tensor_names, **kwargs): except: input_tensor_names, output_tensor_names = validate_and_inference_input_output(\ model, input_tensor_names, output_tensor_names) - from lpot.adaptor.tf_utils.util import fix_ref_type_of_graph_def - from lpot.adaptor.tf_utils.util import strip_unused_nodes + from neural_compressor.adaptor.tf_utils.util import fix_ref_type_of_graph_def + from neural_compressor.adaptor.tf_utils.util import strip_unused_nodes model = fix_ref_type_of_graph_def(model) input_node_names = tensor_to_node(input_tensor_names) output_node_names = tensor_to_node(output_tensor_names) @@ -464,7 +464,7 @@ def checkpoint_session(model, input_tensor_names, output_tensor_names, **kwargs) sess.run(tf.compat.v1.global_variables_initializer()) saver.restore(sess, os.path.join(model, ckpt_prefix)) - from lpot.adaptor.tf_utils.util import get_input_output_node_names + from neural_compressor.adaptor.tf_utils.util import get_input_output_node_names if validate_graph_node(sess.graph.as_graph_def(), tensor_to_node(input_tensor_names)): input_tensor_names = input_tensor_names else: @@ -780,13 +780,13 @@ def output_node_names(self): @property def input_tensor(self): - from lpot.adaptor.tf_utils.util import get_tensor_by_name + from neural_compressor.adaptor.tf_utils.util import get_tensor_by_name return [get_tensor_by_name(\ self.graph, x) for x in self.input_tensor_names] @property def output_tensor(self): - from lpot.adaptor.tf_utils.util import get_tensor_by_name + from neural_compressor.adaptor.tf_utils.util import get_tensor_by_name return [get_tensor_by_name(\ self.graph, x) for x in self.output_tensor_names] @@ -882,7 +882,7 @@ def save(self, root=None): from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import tag_constants - from lpot.adaptor.tf_utils.util import get_tensor_by_name + from neural_compressor.adaptor.tf_utils.util import get_tensor_by_name builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(root) sigs = {} with tf.compat.v1.Session(graph=tf.Graph()) as sess: @@ -907,7 +907,7 @@ class TensorflowCheckpointModel(TensorflowBaseModel): def graph_def(self): if self.model_type == 'graph_def': return self.sess.graph.as_graph_def() - from lpot.adaptor.tf_utils.util import _parse_ckpt_bn_input + from neural_compressor.adaptor.tf_utils.util import _parse_ckpt_bn_input from tensorflow.python.framework import graph_util graph_def = self.sess.graph.as_graph_def() graph_def = _parse_ckpt_bn_input(graph_def) diff --git a/lpot/model/nets_factory.py b/neural_compressor/model/nets_factory.py similarity index 100% rename from lpot/model/nets_factory.py rename to neural_compressor/model/nets_factory.py diff --git a/lpot/model/onnx_model.py b/neural_compressor/model/onnx_model.py similarity index 98% rename from lpot/model/onnx_model.py rename to neural_compressor/model/onnx_model.py index c5778af2186..54c148b16b3 100644 --- a/lpot/model/onnx_model.py +++ b/neural_compressor/model/onnx_model.py @@ -18,8 +18,8 @@ import os import logging from pathlib import Path -from lpot.utils.utility import LazyImport -from lpot.model.base_model import BaseModel +from neural_compressor.utils.utility import LazyImport +from neural_compressor.model.base_model import BaseModel onnx = LazyImport('onnx') ort = LazyImport("onnxruntime") diff --git a/lpot/objective.py b/neural_compressor/objective.py similarity index 97% rename from lpot/objective.py rename to neural_compressor/objective.py index 1a96919d64c..3a941644f1d 100644 --- a/lpot/objective.py +++ b/neural_compressor/objective.py @@ -21,7 +21,7 @@ import tracemalloc from .utils.utility import get_size -"""The objectives supported by lpot, which is driven by accuracy. +"""The objectives supported by neural_compressor, which is driven by accuracy. To support new objective, developer just need implement a new subclass in this file. """ OBJECTIVES = {} @@ -41,7 +41,7 @@ def objective_registry(cls): return cls class Measurer(object): - """The base class for precise benchmark supported by lpot. + """The base class for precise benchmark supported by neural_compressor. Args: representation (string): the string represenation of Measurer object @@ -140,7 +140,7 @@ def end(self): class Objective(object): - """The base class of objectives supported by lpot. + """The base class of objectives supported by neural_compressor. Args: accuracy_criterion (dict): The dict of supported accuracy criterion. diff --git a/lpot/pruners/__init__.py b/neural_compressor/pruners/__init__.py similarity index 100% rename from lpot/pruners/__init__.py rename to neural_compressor/pruners/__init__.py diff --git a/lpot/pruners/gradient_sensitivity.py b/neural_compressor/pruners/gradient_sensitivity.py similarity index 100% rename from lpot/pruners/gradient_sensitivity.py rename to neural_compressor/pruners/gradient_sensitivity.py diff --git a/lpot/pruners/magnitude.py b/neural_compressor/pruners/magnitude.py similarity index 100% rename from lpot/pruners/magnitude.py rename to neural_compressor/pruners/magnitude.py diff --git a/lpot/pruners/pattern_lock.py b/neural_compressor/pruners/pattern_lock.py similarity index 100% rename from lpot/pruners/pattern_lock.py rename to neural_compressor/pruners/pattern_lock.py diff --git a/lpot/pruners/pruner.py b/neural_compressor/pruners/pruner.py similarity index 100% rename from lpot/pruners/pruner.py rename to neural_compressor/pruners/pruner.py diff --git a/lpot/pruners/util/block_mask.py b/neural_compressor/pruners/util/block_mask.py similarity index 100% rename from lpot/pruners/util/block_mask.py rename to neural_compressor/pruners/util/block_mask.py diff --git a/lpot/pruning.py b/neural_compressor/pruning.py similarity index 97% rename from lpot/pruning.py rename to neural_compressor/pruning.py index bbc1c118cb9..d83a04de0aa 100644 --- a/lpot/pruning.py +++ b/neural_compressor/pruning.py @@ -71,7 +71,7 @@ def __call__(self, model, train_dataloader=None, pruning_func=None, eval_dataloa and evaluation phase by code. The tool provides built-in dataloaders and evaluators, user just need provide a dataset implemented __iter__ or __getitem__ methods and invoke dataloader() - with dataset as input parameter to create lpot dataloader before calling this + with dataset as input parameter to create neural_compressor dataloader before calling this function. After that, User specifies fp32 "model", train dataset "train_dataloader" @@ -143,7 +143,7 @@ def eval_func(model): """ logger.warning("This API is going to be deprecated. Please import " - "lpot.experimental.Pruning, initialize an instance of `Pruning`," + "neural_compressor.experimental.Pruning, initialize an instance of `Pruning`," "set its dataloader and metric attributes, then invoke its __call__ method.") self.exp_pruner.model = model self.exp_pruner.train_dataloader = train_dataloader diff --git a/lpot/quantization.py b/neural_compressor/quantization.py similarity index 92% rename from lpot/quantization.py rename to neural_compressor/quantization.py index f928e6736ae..c5ed7c55021 100644 --- a/lpot/quantization.py +++ b/neural_compressor/quantization.py @@ -48,7 +48,7 @@ def __call__(self, model, q_dataloader=None, q_func=None, eval_dataloader=None, eval_func=None): """The main entry point of automatic quantization tuning. - This interface works on all the DL frameworks that lpot supports + This interface works on all the DL frameworks that neural_compressor supports and provides three usages: a) Fully yaml configuration: User specifies all the info through yaml, including dataloaders used in calibration and evaluation phases @@ -60,7 +60,7 @@ def __call__(self, model, q_dataloader=None, q_func=None, eval_dataloader=None, and evaluation phase by code. The tool provides built-in dataloaders and evaluators, user just need provide a dataset implemented __iter__ or __getitem__ methods and invoke dataloader() - with dataset as input parameter to create lpot dataloader before calling this + with dataset as input parameter to create neural_compressor dataloader before calling this function. After that, User specifies fp32 "model", calibration dataset "q_dataloader" @@ -142,7 +142,7 @@ def eval_func(model): """ logger.warning("This API is going to be deprecated. Please import " - "lpot.experimental.Quantization, initialize an instance of `Quantization`," + "neural_compressor.experimental.Quantization, initialize an instance of `Quantization`," "set its dataloader and metric attributes, then invoke its __call__ method.") self.exp_quantizer.model = model @@ -156,14 +156,14 @@ def eval_func(model): elif eval_dataloader is not None: self.exp_quantizer.eval_dataloader = eval_dataloader - lpot_model = self.exp_quantizer() + nc_model = self.exp_quantizer() if self.exp_quantizer.framework == 'tensorflow': - return lpot_model.graph if lpot_model else None + return nc_model.graph if nc_model else None if self.exp_quantizer.framework == 'pytorch': saved_path = os.path.abspath(os.path.join(os.path.expanduser( self.exp_quantizer.conf.usr_cfg.tuning.workspace.path), 'checkpoint')) - lpot_model.save(saved_path) - return lpot_model.model + nc_model.save(saved_path) + return nc_model.model def dataset(self, dataset_type, *args, **kwargs): return DATASETS(self.exp_quantizer.framework)[dataset_type](*args, **kwargs) @@ -176,12 +176,12 @@ def dataloader(self, dataset, batch_size=1, collate_fn=None, last_batch='rollove pin_memory=pin_memory) def metric(self, name, metric_cls, **kwargs): - from .experimental.common import Metric as LpotMetric - lpot_metric = LpotMetric(metric_cls, name, **kwargs) - self.exp_quantizer.metric = lpot_metric + from .experimental.common import Metric as NCMetric + nc_metric = NCMetric(metric_cls, name, **kwargs) + self.exp_quantizer.metric = nc_metric def postprocess(self, name, postprocess_cls, **kwargs): - from .experimental.common import Postprocess as LpotPostprocess - lpot_postprocess = LpotPostprocess(postprocess_cls, name, **kwargs) - self.exp_quantizer.postprocess = lpot_postprocess + from .experimental.common import Postprocess as NCPostprocess + nc_postprocess = NCPostprocess(postprocess_cls, name, **kwargs) + self.exp_quantizer.postprocess = nc_postprocess diff --git a/lpot/strategy/__init__.py b/neural_compressor/strategy/__init__.py similarity index 100% rename from lpot/strategy/__init__.py rename to neural_compressor/strategy/__init__.py diff --git a/lpot/strategy/auto_mixed_precision.py b/neural_compressor/strategy/auto_mixed_precision.py similarity index 100% rename from lpot/strategy/auto_mixed_precision.py rename to neural_compressor/strategy/auto_mixed_precision.py diff --git a/lpot/strategy/basic.py b/neural_compressor/strategy/basic.py similarity index 100% rename from lpot/strategy/basic.py rename to neural_compressor/strategy/basic.py diff --git a/lpot/strategy/bayesian.py b/neural_compressor/strategy/bayesian.py similarity index 100% rename from lpot/strategy/bayesian.py rename to neural_compressor/strategy/bayesian.py diff --git a/lpot/strategy/exhaustive.py b/neural_compressor/strategy/exhaustive.py similarity index 100% rename from lpot/strategy/exhaustive.py rename to neural_compressor/strategy/exhaustive.py diff --git a/lpot/strategy/mse.py b/neural_compressor/strategy/mse.py similarity index 100% rename from lpot/strategy/mse.py rename to neural_compressor/strategy/mse.py diff --git a/lpot/strategy/random.py b/neural_compressor/strategy/random.py similarity index 100% rename from lpot/strategy/random.py rename to neural_compressor/strategy/random.py diff --git a/lpot/strategy/strategy.py b/neural_compressor/strategy/strategy.py similarity index 99% rename from lpot/strategy/strategy.py rename to neural_compressor/strategy/strategy.py index 1508337513e..e7bb3fefa28 100644 --- a/lpot/strategy/strategy.py +++ b/neural_compressor/strategy/strategy.py @@ -33,7 +33,7 @@ from ..conf.dotdict import DotDict, deep_get, deep_set from ..algorithm import AlgorithmScheduler -"""The tuning strategies supported by lpot, including basic, random, bayesian and mse. +"""The tuning strategies supported by neural_compressor, including basic, random, bayesian and mse. User could add new strategies by implementing new TuneStrategy subclass under this directory. The naming convention of new strategy subclass should be something like ABCTuneStrategy, user diff --git a/lpot/template/graph_optimization.yaml b/neural_compressor/template/graph_optimization.yaml similarity index 95% rename from lpot/template/graph_optimization.yaml rename to neural_compressor/template/graph_optimization.yaml index 100eb576cf6..3781f39e739 100644 --- a/lpot/template/graph_optimization.yaml +++ b/neural_compressor/template/graph_optimization.yaml @@ -40,7 +40,7 @@ graph_optimization: # optional. tuning constrai } evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: # optional. used to evaluate accuracy of passing model. topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. configs: # optional. if not specified, use all cores in 1 socket. @@ -49,7 +49,7 @@ evaluation: # optional. used to config inter_num_of_threads: 4 intra_num_of_threads: 28 kmp_blocktime: 1 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 256 dataset: TFRecordDataset: @@ -88,5 +88,5 @@ tuning: tensorboard: True # optional. dump tensor distribution in evaluation phase for debug purpose. default value is False. workspace: - path: /path/to/saving/directory # optional. default workspace is ./lpot_workspace/current_time_stamp, saving tuning history and deploy yaml. + path: /path/to/saving/directory # optional. default workspace is ./nc_workspace/current_time_stamp, saving tuning history and deploy yaml. resume: /path/to/a/specified/snapshot/file # optional. if specified, resume from tuning history. diff --git a/lpot/template/pruning.yaml b/neural_compressor/template/pruning.yaml similarity index 93% rename from lpot/template/pruning.yaml rename to neural_compressor/template/pruning.yaml index 462ddd436f0..ac3c2665b38 100644 --- a/lpot/template/pruning.yaml +++ b/neural_compressor/template/pruning.yaml @@ -15,7 +15,7 @@ version: 1.0 # optional. reserved for future use. if not specified, a supported version would be written back to user yaml. -model: # mandatory. lpot uses this module name and framework name to decide where to save tuning history and deploy yaml. +model: # mandatory. neural_compressor uses this module name and framework name to decide where to save tuning history and deploy yaml. name: resnet50v1.5 framework: pytorch # mandatory. supported values are tensorflow, pytorch, pytorch_fx, pytorch_ipex, onnxrt_integer, onnxrt_qlinear or mxnet; allow new framework backend extension. @@ -68,12 +68,12 @@ pruning: # mandatory only for prunin evaluation: # optional. used to config evaluation process. accuracy: # optional. used to evaluate accuracy of passing model. - metric: # optional. required if user doesn't provide eval_func in lpot.Pruning. + metric: # optional. required if user doesn't provide eval_func in neural_compressor.Pruning. topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. configs: # optional. if not specified, use all cores in 1 socket. cores_per_instance: 28 num_of_instance: 1 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Pruning. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Pruning. batch_size: 256 dataset: ImageFolder: @@ -110,6 +110,6 @@ tuning: tensorboard: True # optional. dump tensor distribution in evaluation phase for debug purpose. default value is False. workspace: - path: /path/to/saving/directory # optional. default workspace is ./lpot_workspace/current_time_stamp, saving tuning history and deploy yaml. + path: /path/to/saving/directory # optional. default workspace is ./nc_workspace/current_time_stamp, saving tuning history and deploy yaml. resume: /path/to/a/specified/snapshot/file # optional. if specified, resume from tuning history. diff --git a/lpot/template/ptq.yaml b/neural_compressor/template/ptq.yaml similarity index 93% rename from lpot/template/ptq.yaml rename to neural_compressor/template/ptq.yaml index 91bcf2a0cd1..008561cd7f6 100644 --- a/lpot/template/ptq.yaml +++ b/neural_compressor/template/ptq.yaml @@ -25,14 +25,14 @@ device: cpu # optional. default value i quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. approach: post_training_static_quant # optional. default value is post_training_static_quant. - recipes: # optional. used to switch lpot int8 receipts ON or OFF. + recipes: # optional. used to switch neural_compressor int8 receipts ON or OFF. scale_propagation_max_pooling: True # optional. default value is True. scale_propagation_concat: True # optional. default value is True. first_conv_or_matmul_quantization: True # optional. default value is True. calibration: # optional. used to specify calibration behavior of post-training-static-quant. other quantization approachs are not necessary. sampling_size: 1000, 2000 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a calib_dataloader in code for lpot.experimental.Quantization. - dataset: # optional. if not specified, user need construct a calibration dataset in code for calib_dataloader of lpot.experimental.Quantization. + dataloader: # optional. if not specified, user need construct a calib_dataloader in code for neural_compressor.experimental.Quantization. + dataset: # optional. if not specified, user need construct a calibration dataset in code for calib_dataloader of neural_compressor.experimental.Quantization. TFRecordDataset: root: /path/to/tf_record transform: @@ -70,7 +70,7 @@ quantization: # optional. tuning constrai } evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: # optional. used to evaluate accuracy of passing model. topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. configs: # optional. if not specified, use all cores in 1 socket. @@ -79,7 +79,7 @@ evaluation: # optional. used to config inter_num_of_threads: 4 intra_num_of_threads: 28 kmp_blocktime: 1 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 256 dataset: TFRecordDataset: @@ -108,7 +108,7 @@ tuning: name: basic # optional. default value is basic. other values are bayesian, mse, sigopt. sigopt_api_token: YOUR-ACCOUNT-API-TOKEN # optional. Necessary if strategy name is sigopt. sigopt_project_id: PROJECT-ID # optional. Necessary if strategy name is sigopt. - sigopt_experiment_name: lpot-tune # optional. default is lpot-tune if strategy name is sigopt. + sigopt_experiment_name: nc-tune # optional. default is nc-tune if strategy name is sigopt. accuracy_criterion: relative: 0.01 # optional. default value is relative, other value is absolute. this example allows relative accuracy loss: 1%. objective: performance # optional. objective with accuracy constraint guaranteed. default value is performance. other values are modelsize and footprint. @@ -121,5 +121,5 @@ tuning: tensorboard: True # optional. dump tensor distribution in evaluation phase for debug purpose. default value is False. workspace: - path: /path/to/saving/directory # optional. default workspace is ./lpot_workspace/current_time_stamp, saving tuning history and deploy yaml. + path: /path/to/saving/directory # optional. default workspace is ./nc_workspace/current_time_stamp, saving tuning history and deploy yaml. resume: /path/to/a/specified/snapshot/file # optional. if specified, resume from tuning history. diff --git a/lpot/template/qat.yaml b/neural_compressor/template/qat.yaml similarity index 96% rename from lpot/template/qat.yaml rename to neural_compressor/template/qat.yaml index dd64c3b7709..a0f1293a5eb 100644 --- a/lpot/template/qat.yaml +++ b/neural_compressor/template/qat.yaml @@ -78,13 +78,13 @@ quantization: # optional. required for QA } evaluation: # optional. optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: # optional. used to evaluate accuracy of passing model. topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. configs: cores_per_instance: 28 num_of_instance: 1 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 256 dataset: ImageFolder: @@ -127,6 +127,6 @@ tuning: tensorboard: True # optional. dump tensor distribution in evaluation phase for debug purpose. default value is False. workspace: - path: /path/to/saving/directory # optional. default workspace is ./lpot_workspace/current_time_stamp, saving tuning history and deploy yaml. + path: /path/to/saving/directory # optional. default workspace is ./nc_workspace/current_time_stamp, saving tuning history and deploy yaml. resume: /path/to/a/specified/snapshot/file # optional. if specified, resume from tuning history. diff --git a/lpot/utils/__init__.py b/neural_compressor/utils/__init__.py similarity index 100% rename from lpot/utils/__init__.py rename to neural_compressor/utils/__init__.py diff --git a/lpot/utils/collect_layer_histogram.py b/neural_compressor/utils/collect_layer_histogram.py similarity index 97% rename from lpot/utils/collect_layer_histogram.py rename to neural_compressor/utils/collect_layer_histogram.py index 508c20509a8..774fdad7f2b 100644 --- a/lpot/utils/collect_layer_histogram.py +++ b/neural_compressor/utils/collect_layer_histogram.py @@ -16,7 +16,7 @@ # limitations under the License. import numpy as np -from lpot.utils.utility import combine_histogram +from neural_compressor.utils.utility import combine_histogram class LayerHistogramCollector(object): """Saves layer histogram in a dict with layer names as keys and lists of NDArrays as diff --git a/lpot/utils/create_obj_from_config.py b/neural_compressor/utils/create_obj_from_config.py similarity index 97% rename from lpot/utils/create_obj_from_config.py rename to neural_compressor/utils/create_obj_from_config.py index db5ce6af015..d8d1706c982 100644 --- a/lpot/utils/create_obj_from_config.py +++ b/neural_compressor/utils/create_obj_from_config.py @@ -15,9 +15,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from lpot.experimental.metric import METRICS -from lpot.experimental.data import DATASETS, TRANSFORMS, FILTERS, DATALOADERS -from lpot.experimental.common import Optimizers, Criterions +from neural_compressor.experimental.metric import METRICS +from neural_compressor.experimental.data import DATASETS, TRANSFORMS, FILTERS, DATALOADERS +from neural_compressor.experimental.common import Optimizers, Criterions from collections import OrderedDict import copy diff --git a/lpot/utils/kl_divergence.py b/neural_compressor/utils/kl_divergence.py similarity index 100% rename from lpot/utils/kl_divergence.py rename to neural_compressor/utils/kl_divergence.py diff --git a/lpot/utils/logger.py b/neural_compressor/utils/logger.py similarity index 100% rename from lpot/utils/logger.py rename to neural_compressor/utils/logger.py diff --git a/lpot/utils/pytorch.py b/neural_compressor/utils/pytorch.py similarity index 100% rename from lpot/utils/pytorch.py rename to neural_compressor/utils/pytorch.py diff --git a/lpot/utils/utility.py b/neural_compressor/utils/utility.py similarity index 97% rename from lpot/utils/utility.py rename to neural_compressor/utils/utility.py index c03ce8d89ef..0dda14bada5 100644 --- a/lpot/utils/utility.py +++ b/neural_compressor/utils/utility.py @@ -35,7 +35,7 @@ import threading, _thread import cpuinfo import numpy as np -from lpot.utils import logger +from neural_compressor.utils import logger import prettytable as pt def singleton(cls): @@ -280,24 +280,24 @@ def recover(fp32_model, tuning_history_path, num, **kwargs): framework = tuning_history[0]['cfg']['model']['framework'] if 'pytorch' in framework: - from lpot.utils.pytorch import load + from neural_compressor.utils.pytorch import load tune_index_qmodel = load(model=fp32_model, history_cfg=q_config, **kwargs) return tune_index_qmodel - from lpot.adaptor import FRAMEWORKS + from neural_compressor.adaptor import FRAMEWORKS adaptor = FRAMEWORKS[framework](q_config['framework_specific_info']) if 'onnxrt' in framework: - from lpot.experimental import common + from neural_compressor.experimental import common ox_fp32_model = common.Model(fp32_model) tune_index_qmodel = adaptor.recover(ox_fp32_model, q_config) return tune_index_qmodel elif 'tensorflow' in framework: - from lpot.experimental import common + from neural_compressor.experimental import common tf_fp32_model = common.Model(fp32_model) tune_index_qmodel = adaptor.recover_tuned_model(tf_fp32_model, q_config) return tune_index_qmodel elif 'mxnet' in framework: - from lpot.experimental import common + from neural_compressor.experimental import common mx_fp32_model = common.Model(fp32_model) tune_index_qmodel = adaptor.recover_tuned_model(mx_fp32_model, q_config) return tune_index_qmodel diff --git a/lpot/ux/__init__.py b/neural_compressor/ux/__init__.py similarity index 100% rename from lpot/ux/__init__.py rename to neural_compressor/ux/__init__.py diff --git a/lpot/ux/bin/neural_compressor_bench b/neural_compressor/ux/bin/neural_compressor_bench similarity index 91% rename from lpot/ux/bin/neural_compressor_bench rename to neural_compressor/ux/bin/neural_compressor_bench index d1613502a64..24d0f18fd10 100644 --- a/lpot/ux/bin/neural_compressor_bench +++ b/neural_compressor/ux/bin/neural_compressor_bench @@ -20,5 +20,5 @@ import gevent.monkey gevent.monkey.patch_all() if __name__ == '__main__': - from lpot.ux.neural_compressor_bench import main + from neural_compressor.ux.neural_compressor_bench import main main() diff --git a/lpot/ux/components/__init__.py b/neural_compressor/ux/components/__init__.py similarity index 100% rename from lpot/ux/components/__init__.py rename to neural_compressor/ux/components/__init__.py diff --git a/lpot/ux/components/benchmark/README.md b/neural_compressor/ux/components/benchmark/README.md similarity index 69% rename from lpot/ux/components/benchmark/README.md rename to neural_compressor/ux/components/benchmark/README.md index 2eada5925b5..d19b1bb2bc9 100644 --- a/lpot/ux/components/benchmark/README.md +++ b/neural_compressor/ux/components/benchmark/README.md @@ -67,8 +67,8 @@ "batch_size": 1, "framework": "tensorflow", "config_path": "/path/to/workspace/workloads/_/config.yaml", - "benchmark_script": "/localdisk/lpot/ux/components/benchmark/benchmark_model.py", - "command": "python /localdisk/lpot/ux/components/benchmark/benchmark_model.py --config /path/to/workspace/workloads/_/config.yaml --input-graph /localdisk/fp32.pb --mode performance --framework tensorflow" + "benchmark_script": "/localdisk/neural_compressor/ux/components/benchmark/benchmark_model.py", + "command": "python /localdisk/neural_compressor/ux/components/benchmark/benchmark_model.py --config /path/to/workspace/workloads/_/config.yaml --input-graph /localdisk/fp32.pb --mode performance --framework tensorflow" } } }, @@ -94,8 +94,8 @@ "batch_size": 1, "framework": "tensorflow", "config_path": "/path/to/workspace/workloads/_/config.yaml", - "benchmark_script": "/localdisk/lpot/ux/components/benchmark/benchmark_model.py", - "command": "python /localdisk/lpot/ux/components/benchmark/benchmark_model.py --config /path/to/workspace/workloads/_/config.yaml --input-graph /localdisk/fp32.pb --mode performance --framework tensorflow" + "benchmark_script": "/localdisk/neural_compressor/ux/components/benchmark/benchmark_model.py", + "command": "python /localdisk/neural_compressor/ux/components/benchmark/benchmark_model.py --config /path/to/workspace/workloads/_/config.yaml --input-graph /localdisk/fp32.pb --mode performance --framework tensorflow" } }, "optimized_model_benchmark": { @@ -108,8 +108,8 @@ "batch_size": 1, "framework": "tensorflow", "config_path": "/path/to/workspace/workloads/_/config.yaml", - "benchmark_script": "/localdisk/lpot/ux/components/benchmark/benchmark_model.py", - "command": "python /localdisk/lpot/ux/components/benchmark/benchmark_model.py --config /path/to/workspace/workloads/_/config.yaml --input-graph /localdisk/int8.pb --mode performance --framework tensorflow" + "benchmark_script": "/localdisk/neural_compressor/ux/components/benchmark/benchmark_model.py", + "command": "python /localdisk/neural_compressor/ux/components/benchmark/benchmark_model.py --config /path/to/workspace/workloads/_/config.yaml --input-graph /localdisk/int8.pb --mode performance --framework tensorflow" } } }, @@ -136,8 +136,8 @@ "batch_size": 1, "framework": "tensorflow", "config_path": "/path/to/workspace/workloads/_/config.yaml", - "benchmark_script": "/localdisk/lpot/ux/components/benchmark/benchmark_model.py", - "command": "python /localdisk/lpot/ux/components/benchmark/benchmark_model.py --config /path/to/workspace/workloads/_/config.yaml --input-graph /localdisk/fp32.pb --mode performance --framework tensorflow" + "benchmark_script": "/localdisk/neural_compressor/ux/components/benchmark/benchmark_model.py", + "command": "python /localdisk/neural_compressor/ux/components/benchmark/benchmark_model.py --config /path/to/workspace/workloads/_/config.yaml --input-graph /localdisk/fp32.pb --mode performance --framework tensorflow" } }, "optimized_model_benchmark": { @@ -150,8 +150,8 @@ "batch_size": 1, "framework": "tensorflow", "config_path": "/path/to/workspace/workloads/_/config.yaml", - "benchmark_script": "/localdisk/lpot/ux/components/benchmark/benchmark_model.py", - "command": "python /localdisk/lpot/ux/components/benchmark/benchmark_model.py --config /path/to/workspace/workloads/_/config.yaml --input-graph /localdisk/int8.pb --mode performance --framework tensorflow" + "benchmark_script": "/localdisk/neural_compressor/ux/components/benchmark/benchmark_model.py", + "command": "python /localdisk/neural_compressor/ux/components/benchmark/benchmark_model.py --config /path/to/workspace/workloads/_/config.yaml --input-graph /localdisk/int8.pb --mode performance --framework tensorflow" } } }, diff --git a/lpot/ux/components/benchmark/__init__.py b/neural_compressor/ux/components/benchmark/__init__.py similarity index 100% rename from lpot/ux/components/benchmark/__init__.py rename to neural_compressor/ux/components/benchmark/__init__.py diff --git a/lpot/ux/components/benchmark/benchmark.py b/neural_compressor/ux/components/benchmark/benchmark.py similarity index 90% rename from lpot/ux/components/benchmark/benchmark.py rename to neural_compressor/ux/components/benchmark/benchmark.py index 34675f7de00..62cda4b98e3 100644 --- a/lpot/ux/components/benchmark/benchmark.py +++ b/neural_compressor/ux/components/benchmark/benchmark.py @@ -18,11 +18,11 @@ import re from typing import Any, Dict -from lpot.ux.components.benchmark import Benchmarks -from lpot.ux.components.benchmark.benchmark_model import benchmark_model -from lpot.ux.utils.exceptions import ClientErrorException -from lpot.ux.utils.hw_info import HWInfo -from lpot.ux.utils.workload.workload import Workload +from neural_compressor.ux.components.benchmark import Benchmarks +from neural_compressor.ux.components.benchmark.benchmark_model import benchmark_model +from neural_compressor.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.hw_info import HWInfo +from neural_compressor.ux.utils.workload.workload import Workload class Benchmark: diff --git a/lpot/ux/components/benchmark/benchmark_model.py b/neural_compressor/ux/components/benchmark/benchmark_model.py similarity index 91% rename from lpot/ux/components/benchmark/benchmark_model.py rename to neural_compressor/ux/components/benchmark/benchmark_model.py index 7d78f149fb8..2b9777c5958 100644 --- a/lpot/ux/components/benchmark/benchmark_model.py +++ b/neural_compressor/ux/components/benchmark/benchmark_model.py @@ -17,7 +17,7 @@ import argparse from typing import Any -from lpot.ux.components.benchmark import Benchmarks +from neural_compressor.ux.components.benchmark import Benchmarks def parse_args() -> Any: @@ -58,7 +58,7 @@ def benchmark_model( framework: str, ) -> None: """Execute benchmark.""" - from lpot.experimental import Benchmark, common + from neural_compressor.experimental import Benchmark, common if framework == "onnxrt": import onnx @@ -72,7 +72,7 @@ def benchmark_model( def set_eager_execution(input_graph: str) -> None: """Set eager execution as required by model.""" - from lpot.ux.components.model.model_type_getter import get_model_type + from neural_compressor.ux.components.model.model_type_getter import get_model_type model_type = get_model_type(input_graph) diff --git a/lpot/ux/components/benchmark/execute_benchmark.py b/neural_compressor/ux/components/benchmark/execute_benchmark.py similarity index 91% rename from lpot/ux/components/benchmark/execute_benchmark.py rename to neural_compressor/ux/components/benchmark/execute_benchmark.py index bfc09c3091d..7c6f43b0db2 100644 --- a/lpot/ux/components/benchmark/execute_benchmark.py +++ b/neural_compressor/ux/components/benchmark/execute_benchmark.py @@ -19,16 +19,16 @@ import os from typing import Any, Dict, List -from lpot.ux.components.benchmark import Benchmarks -from lpot.ux.components.benchmark.benchmark import Benchmark -from lpot.ux.utils.exceptions import ClientErrorException, InternalException -from lpot.ux.utils.executor import Executor -from lpot.ux.utils.logger import log -from lpot.ux.utils.parser import BenchmarkParserFactory -from lpot.ux.utils.templates.workdir import Workdir -from lpot.ux.utils.utils import _load_json_as_dict -from lpot.ux.utils.workload.workload import Workload -from lpot.ux.web.communication import MessageQueue +from neural_compressor.ux.components.benchmark import Benchmarks +from neural_compressor.ux.components.benchmark.benchmark import Benchmark +from neural_compressor.ux.utils.exceptions import ClientErrorException, InternalException +from neural_compressor.ux.utils.executor import Executor +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.parser import BenchmarkParserFactory +from neural_compressor.ux.utils.templates.workdir import Workdir +from neural_compressor.ux.utils.utils import _load_json_as_dict +from neural_compressor.ux.utils.workload.workload import Workload +from neural_compressor.ux.web.communication import MessageQueue mq = MessageQueue() @@ -103,7 +103,7 @@ def execute_real_benchmark( } } """ - from lpot.ux.utils.workload.workload import Workload + from neural_compressor.ux.utils.workload.workload import Workload input_model = data.get("input_model", None) input_model.update({"model_type": "input_model"}) diff --git a/lpot/ux/components/configuration_wizard/README.md b/neural_compressor/ux/components/configuration_wizard/README.md similarity index 100% rename from lpot/ux/components/configuration_wizard/README.md rename to neural_compressor/ux/components/configuration_wizard/README.md diff --git a/lpot/ux/components/configuration_wizard/__init__.py b/neural_compressor/ux/components/configuration_wizard/__init__.py similarity index 100% rename from lpot/ux/components/configuration_wizard/__init__.py rename to neural_compressor/ux/components/configuration_wizard/__init__.py diff --git a/lpot/ux/components/configuration_wizard/configuration_parser.py b/neural_compressor/ux/components/configuration_wizard/configuration_parser.py similarity index 98% rename from lpot/ux/components/configuration_wizard/configuration_parser.py rename to neural_compressor/ux/components/configuration_wizard/configuration_parser.py index 3758e0d0191..b10495deae7 100644 --- a/lpot/ux/components/configuration_wizard/configuration_parser.py +++ b/neural_compressor/ux/components/configuration_wizard/configuration_parser.py @@ -17,9 +17,9 @@ from collections.abc import Iterable from typing import Any, Dict, List, Type, Union -from lpot.ux.utils.exceptions import ClientErrorException -from lpot.ux.utils.hw_info import HWInfo -from lpot.ux.utils.utils import parse_bool_value +from neural_compressor.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.hw_info import HWInfo +from neural_compressor.ux.utils.utils import parse_bool_value class ConfigurationParser: diff --git a/lpot/ux/components/configuration_wizard/get_boundary_nodes.py b/neural_compressor/ux/components/configuration_wizard/get_boundary_nodes.py similarity index 88% rename from lpot/ux/components/configuration_wizard/get_boundary_nodes.py rename to neural_compressor/ux/components/configuration_wizard/get_boundary_nodes.py index bfd083edcd6..39c253ba003 100644 --- a/lpot/ux/components/configuration_wizard/get_boundary_nodes.py +++ b/neural_compressor/ux/components/configuration_wizard/get_boundary_nodes.py @@ -17,11 +17,11 @@ import json from typing import Any, Dict -from lpot.ux.components.model.repository import ModelRepository -from lpot.ux.utils.exceptions import ClientErrorException, NotFoundException -from lpot.ux.utils.logger import log -from lpot.ux.utils.utils import check_module -from lpot.ux.web.communication import MessageQueue +from neural_compressor.ux.components.model.repository import ModelRepository +from neural_compressor.ux.utils.exceptions import ClientErrorException, NotFoundException +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.utils import check_module +from neural_compressor.ux.web.communication import MessageQueue mq = MessageQueue() diff --git a/lpot/ux/components/configuration_wizard/get_configuration.py b/neural_compressor/ux/components/configuration_wizard/get_configuration.py similarity index 87% rename from lpot/ux/components/configuration_wizard/get_configuration.py rename to neural_compressor/ux/components/configuration_wizard/get_configuration.py index ffac5fd8644..1402437c57a 100644 --- a/lpot/ux/components/configuration_wizard/get_configuration.py +++ b/neural_compressor/ux/components/configuration_wizard/get_configuration.py @@ -17,17 +17,17 @@ from pathlib import Path from typing import Any, Dict, List, Union -from lpot.ux.components.model.repository import ModelRepository -from lpot.ux.utils.exceptions import ClientErrorException -from lpot.ux.utils.hw_info import HWInfo -from lpot.ux.utils.workload.config import Config +from neural_compressor.ux.components.model.repository import ModelRepository +from neural_compressor.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.hw_info import HWInfo +from neural_compressor.ux.utils.workload.config import Config def get_predefined_configuration( data: Dict[str, Any], ) -> Union[Dict[str, Any], List[Dict[str, Any]]]: """Get configuration.""" - from lpot.ux.utils.utils import get_framework_from_path, get_predefined_config_path + from neural_compressor.ux.utils.utils import get_framework_from_path, get_predefined_config_path model_path = data.get("model_path", "") if not ModelRepository.is_model_path(model_path): diff --git a/lpot/ux/components/configuration_wizard/params_feeder.py b/neural_compressor/ux/components/configuration_wizard/params_feeder.py similarity index 94% rename from lpot/ux/components/configuration_wizard/params_feeder.py rename to neural_compressor/ux/components/configuration_wizard/params_feeder.py index 0e619d4250a..82461c2c4fc 100644 --- a/lpot/ux/components/configuration_wizard/params_feeder.py +++ b/neural_compressor/ux/components/configuration_wizard/params_feeder.py @@ -16,21 +16,21 @@ import os from typing import Any, Dict, List, Optional -from lpot.experimental.metric.metric import framework_metrics -from lpot.objective import OBJECTIVES -from lpot.strategy import STRATEGIES -from lpot.ux.components.model.repository import ModelRepository -from lpot.ux.utils.exceptions import ClientErrorException -from lpot.ux.utils.utils import ( +from neural_compressor.experimental.metric.metric import framework_metrics +from neural_compressor.objective import OBJECTIVES +from neural_compressor.strategy import STRATEGIES +from neural_compressor.ux.components.model.repository import ModelRepository +from neural_compressor.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.utils import ( check_module, filter_transforms, load_dataloader_config, - load_help_lpot_params, + load_help_nc_params, load_model_config, load_precisions_config, load_transforms_config, ) -from lpot.ux.web.configuration import Configuration +from neural_compressor.ux.web.configuration import Configuration class Feeder: @@ -148,7 +148,7 @@ def get_transforms(self) -> List[Dict[str, Any]]: @staticmethod def get_objectives() -> List[dict]: """Get list of supported objectives.""" - help_dict = load_help_lpot_params("objectives") + help_dict = load_help_nc_params("objectives") objectives = [] for objective in OBJECTIVES.keys(): @@ -159,7 +159,7 @@ def get_objectives() -> List[dict]: @staticmethod def get_strategies() -> List[Dict[str, Any]]: """Get list of supported strategies.""" - help_dict = load_help_lpot_params("strategies") + help_dict = load_help_nc_params("strategies") strategies = [] for strategy in STRATEGIES.keys(): if "sigopt" == strategy: @@ -205,7 +205,7 @@ def get_metrics(self) -> List[Dict[str, Any]]: else: check_module(framework) - help_dict = load_help_lpot_params("metrics") + help_dict = load_help_nc_params("metrics") key_in_framework_metrics = "onnxrt_qlinearops" if framework == "onnxrt" else framework metrics_class = framework_metrics.get(key_in_framework_metrics) diff --git a/lpot/ux/components/configuration_wizard/save_workload.py b/neural_compressor/ux/components/configuration_wizard/save_workload.py similarity index 94% rename from lpot/ux/components/configuration_wizard/save_workload.py rename to neural_compressor/ux/components/configuration_wizard/save_workload.py index 229f7aaef59..cf676f28390 100644 --- a/lpot/ux/components/configuration_wizard/save_workload.py +++ b/neural_compressor/ux/components/configuration_wizard/save_workload.py @@ -19,13 +19,13 @@ from shutil import copy from typing import Any, Dict, List, Optional, Tuple, Union -from lpot.ux.components.configuration_wizard.configuration_parser import ConfigurationParser -from lpot.ux.utils.exceptions import NotFoundException -from lpot.ux.utils.templates.workdir import Workdir -from lpot.ux.utils.utils import replace_with_values -from lpot.ux.utils.workload.config import Config -from lpot.ux.utils.workload.dataloader import Dataset, Transform -from lpot.ux.utils.workload.workload import Workload +from neural_compressor.ux.components.configuration_wizard.configuration_parser import ConfigurationParser +from neural_compressor.ux.utils.exceptions import NotFoundException +from neural_compressor.ux.utils.templates.workdir import Workdir +from neural_compressor.ux.utils.utils import replace_with_values +from neural_compressor.ux.utils.workload.config import Config +from neural_compressor.ux.utils.workload.dataloader import Dataset, Transform +from neural_compressor.ux.utils.workload.workload import Workload logging.basicConfig(level=logging.INFO) diff --git a/lpot/ux/components/file_browser/__init__.py b/neural_compressor/ux/components/file_browser/__init__.py similarity index 100% rename from lpot/ux/components/file_browser/__init__.py rename to neural_compressor/ux/components/file_browser/__init__.py diff --git a/lpot/ux/components/file_browser/file_browser.py b/neural_compressor/ux/components/file_browser/file_browser.py similarity index 94% rename from lpot/ux/components/file_browser/file_browser.py rename to neural_compressor/ux/components/file_browser/file_browser.py index 318be236a75..aae06062090 100644 --- a/lpot/ux/components/file_browser/file_browser.py +++ b/neural_compressor/ux/components/file_browser/file_browser.py @@ -17,9 +17,9 @@ import os from typing import Any, Dict, List -from lpot.ux.components.model.repository import ModelRepository -from lpot.ux.utils.exceptions import AccessDeniedException, ClientErrorException, NotFoundException -from lpot.ux.utils.utils import is_dataset_file, is_hidden, verify_file_path +from neural_compressor.ux.components.model.repository import ModelRepository +from neural_compressor.ux.utils.exceptions import AccessDeniedException, ClientErrorException, NotFoundException +from neural_compressor.ux.utils.utils import is_dataset_file, is_hidden, verify_file_path def get_directory_entries( diff --git a/lpot/ux/components/graph/__init__.py b/neural_compressor/ux/components/graph/__init__.py similarity index 100% rename from lpot/ux/components/graph/__init__.py rename to neural_compressor/ux/components/graph/__init__.py diff --git a/lpot/ux/components/graph/attribute.py b/neural_compressor/ux/components/graph/attribute.py similarity index 93% rename from lpot/ux/components/graph/attribute.py rename to neural_compressor/ux/components/graph/attribute.py index b71ec450c66..dd0e7f17008 100644 --- a/lpot/ux/components/graph/attribute.py +++ b/neural_compressor/ux/components/graph/attribute.py @@ -16,7 +16,7 @@ from typing import Any -from lpot.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.json_serializer import JsonSerializer class Attribute(JsonSerializer): diff --git a/lpot/ux/components/graph/collapser.py b/neural_compressor/ux/components/graph/collapser.py similarity index 97% rename from lpot/ux/components/graph/collapser.py rename to neural_compressor/ux/components/graph/collapser.py index 32df05c6f17..1755ccaa056 100644 --- a/lpot/ux/components/graph/collapser.py +++ b/neural_compressor/ux/components/graph/collapser.py @@ -16,8 +16,8 @@ from typing import Dict, List, Set -from lpot.ux.components.graph.graph import Graph -from lpot.ux.components.graph.node import GroupNode, Node +from neural_compressor.ux.components.graph.graph import Graph +from neural_compressor.ux.components.graph.node import GroupNode, Node class Collapser: diff --git a/lpot/ux/components/graph/edge.py b/neural_compressor/ux/components/graph/edge.py similarity index 89% rename from lpot/ux/components/graph/edge.py rename to neural_compressor/ux/components/graph/edge.py index b338690240b..6e699bd70b5 100644 --- a/lpot/ux/components/graph/edge.py +++ b/neural_compressor/ux/components/graph/edge.py @@ -14,8 +14,8 @@ # limitations under the License. """Edge class.""" -from lpot.ux.components.graph.node import Node -from lpot.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.components.graph.node import Node +from neural_compressor.ux.utils.json_serializer import JsonSerializer class Edge(JsonSerializer): diff --git a/lpot/ux/components/graph/graph.py b/neural_compressor/ux/components/graph/graph.py similarity index 86% rename from lpot/ux/components/graph/graph.py rename to neural_compressor/ux/components/graph/graph.py index 8d449c150fa..f3b44d92cf0 100644 --- a/lpot/ux/components/graph/graph.py +++ b/neural_compressor/ux/components/graph/graph.py @@ -16,11 +16,11 @@ from typing import Dict, List -from lpot.ux.components.graph.edge import Edge -from lpot.ux.components.graph.node import Node -from lpot.ux.utils.exceptions import NotFoundException -from lpot.ux.utils.json_serializer import JsonSerializer -from lpot.ux.utils.logger import log +from neural_compressor.ux.components.graph.edge import Edge +from neural_compressor.ux.components.graph.node import Node +from neural_compressor.ux.utils.exceptions import NotFoundException +from neural_compressor.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.logger import log class Graph(JsonSerializer): diff --git a/lpot/ux/components/graph/graph_reader.py b/neural_compressor/ux/components/graph/graph_reader.py similarity index 84% rename from lpot/ux/components/graph/graph_reader.py rename to neural_compressor/ux/components/graph/graph_reader.py index 13fa870be39..c6ee0126cbe 100644 --- a/lpot/ux/components/graph/graph_reader.py +++ b/neural_compressor/ux/components/graph/graph_reader.py @@ -17,9 +17,9 @@ from typing import List -from lpot.ux.components.graph.collapser import Collapser -from lpot.ux.components.graph.graph import Graph -from lpot.ux.components.model.repository import ModelRepository +from neural_compressor.ux.components.graph.collapser import Collapser +from neural_compressor.ux.components.graph.graph import Graph +from neural_compressor.ux.components.model.repository import ModelRepository class GraphReader: diff --git a/lpot/ux/components/graph/node.py b/neural_compressor/ux/components/graph/node.py similarity index 92% rename from lpot/ux/components/graph/node.py rename to neural_compressor/ux/components/graph/node.py index bdcaf07afe7..5a41e2e7861 100644 --- a/lpot/ux/components/graph/node.py +++ b/neural_compressor/ux/components/graph/node.py @@ -16,8 +16,8 @@ from typing import Any, Dict, List -from lpot.ux.components.graph.attribute import Attribute -from lpot.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.components.graph.attribute import Attribute +from neural_compressor.ux.utils.json_serializer import JsonSerializer class Node(JsonSerializer): diff --git a/lpot/ux/components/graph/reader/__init__.py b/neural_compressor/ux/components/graph/reader/__init__.py similarity index 100% rename from lpot/ux/components/graph/reader/__init__.py rename to neural_compressor/ux/components/graph/reader/__init__.py diff --git a/lpot/ux/components/graph/reader/tensorflow_reader.py b/neural_compressor/ux/components/graph/reader/tensorflow_reader.py similarity index 90% rename from lpot/ux/components/graph/reader/tensorflow_reader.py rename to neural_compressor/ux/components/graph/reader/tensorflow_reader.py index a161bf61c40..4e46ee6ff36 100644 --- a/lpot/ux/components/graph/reader/tensorflow_reader.py +++ b/neural_compressor/ux/components/graph/reader/tensorflow_reader.py @@ -20,11 +20,11 @@ from tensorflow.core.framework.node_def_pb2 import NodeDef from tensorflow.python.framework.dtypes import _TYPE_TO_STRING -from lpot.ux.components.graph.attribute import Attribute -from lpot.ux.components.graph.graph import Graph -from lpot.ux.components.graph.node import Node -from lpot.ux.components.model.model import Model -from lpot.ux.utils.exceptions import NotFoundException +from neural_compressor.ux.components.graph.attribute import Attribute +from neural_compressor.ux.components.graph.graph import Graph +from neural_compressor.ux.components.graph.node import Node +from neural_compressor.ux.components.model.model import Model +from neural_compressor.ux.utils.exceptions import NotFoundException class TensorflowReader: @@ -39,12 +39,12 @@ def read(self) -> Graph: """Read a graph.""" self._hidden_node_ids = {} - from lpot.ux.components.model.tensorflow.model import TensorflowModel + from neural_compressor.ux.components.model.tensorflow.model import TensorflowModel if not isinstance(self.model, TensorflowModel): raise NotFoundException(f"{self.model.path} is not Tensorflow model.") - graph_def = self.model.lpot_model_instance.graph_def + graph_def = self.model.nc_model_instance.graph_def graph = Graph() diff --git a/lpot/ux/components/manage_workspace.py b/neural_compressor/ux/components/manage_workspace.py similarity index 89% rename from lpot/ux/components/manage_workspace.py rename to neural_compressor/ux/components/manage_workspace.py index c30127b49a6..18ed026cc0d 100644 --- a/lpot/ux/components/manage_workspace.py +++ b/neural_compressor/ux/components/manage_workspace.py @@ -16,8 +16,8 @@ from typing import Any, Dict -from lpot.ux.utils.templates.workdir import Workdir -from lpot.ux.web.configuration import Configuration +from neural_compressor.ux.utils.templates.workdir import Workdir +from neural_compressor.ux.web.configuration import Configuration def get_default_path(data: Dict[str, Any]) -> Dict[str, Any]: diff --git a/lpot/ux/components/model/__init__.py b/neural_compressor/ux/components/model/__init__.py similarity index 100% rename from lpot/ux/components/model/__init__.py rename to neural_compressor/ux/components/model/__init__.py diff --git a/lpot/ux/components/model/domain.py b/neural_compressor/ux/components/model/domain.py similarity index 93% rename from lpot/ux/components/model/domain.py rename to neural_compressor/ux/components/model/domain.py index f14b10a92c9..2a86db3bfcc 100644 --- a/lpot/ux/components/model/domain.py +++ b/neural_compressor/ux/components/model/domain.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Model domain class.""" -from lpot.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.json_serializer import JsonSerializer class Domain(JsonSerializer): diff --git a/lpot/ux/components/model/model.py b/neural_compressor/ux/components/model/model.py similarity index 96% rename from lpot/ux/components/model/model.py rename to neural_compressor/ux/components/model/model.py index f75dac44cb7..50903d7e366 100644 --- a/lpot/ux/components/model/model.py +++ b/neural_compressor/ux/components/model/model.py @@ -17,8 +17,8 @@ from abc import ABC, abstractmethod from typing import Any, List, Optional -from lpot.ux.components.graph.graph import Graph -from lpot.ux.components.model.domain import Domain +from neural_compressor.ux.components.graph.graph import Graph +from neural_compressor.ux.components.model.domain import Domain class Model(ABC): diff --git a/lpot/ux/components/model/model_type_getter.py b/neural_compressor/ux/components/model/model_type_getter.py similarity index 83% rename from lpot/ux/components/model/model_type_getter.py rename to neural_compressor/ux/components/model/model_type_getter.py index b36aca867b0..1d0913181a0 100644 --- a/lpot/ux/components/model/model_type_getter.py +++ b/neural_compressor/ux/components/model/model_type_getter.py @@ -14,8 +14,8 @@ # limitations under the License. """Model type getter.""" -from lpot.model.model import get_model_type as lpot_get_model_type -from lpot.ux.utils.expiring_dict import ExpiringDict +from neural_compressor.model.model import get_model_type as nc_get_model_type +from neural_compressor.ux.utils.expiring_dict import ExpiringDict model_type_cache = ExpiringDict(ttl=600) @@ -26,7 +26,7 @@ def get_model_type(model_path: str) -> str: return model_type_cache[model_path] except KeyError: try: - model_type = lpot_get_model_type(model_path) + model_type = nc_get_model_type(model_path) except Exception: model_type = "not a model" model_type_cache[model_path] = model_type diff --git a/lpot/ux/components/model/onnxrt/__init__.py b/neural_compressor/ux/components/model/onnxrt/__init__.py similarity index 100% rename from lpot/ux/components/model/onnxrt/__init__.py rename to neural_compressor/ux/components/model/onnxrt/__init__.py diff --git a/lpot/ux/components/model/onnxrt/model.py b/neural_compressor/ux/components/model/onnxrt/model.py similarity index 89% rename from lpot/ux/components/model/onnxrt/model.py rename to neural_compressor/ux/components/model/onnxrt/model.py index 97184c60573..03a7cb3f538 100644 --- a/lpot/ux/components/model/onnxrt/model.py +++ b/neural_compressor/ux/components/model/onnxrt/model.py @@ -14,8 +14,8 @@ # limitations under the License. """Onnxrt model class.""" -from lpot.ux.components.model.model import Model -from lpot.ux.utils.utils import check_module, get_file_extension +from neural_compressor.ux.components.model.model import Model +from neural_compressor.ux.utils.utils import check_module, get_file_extension class OnnxrtModel(Model): diff --git a/lpot/ux/components/model/repository.py b/neural_compressor/ux/components/model/repository.py similarity index 81% rename from lpot/ux/components/model/repository.py rename to neural_compressor/ux/components/model/repository.py index 57c035c4867..9e18ece6cf2 100644 --- a/lpot/ux/components/model/repository.py +++ b/neural_compressor/ux/components/model/repository.py @@ -16,13 +16,13 @@ from typing import List, Optional, Type -from lpot.ux.components.model.model import Model -from lpot.ux.components.model.onnxrt.model import OnnxrtModel -from lpot.ux.components.model.tensorflow.frozen_pb import FrozenPbModel -from lpot.ux.components.model.tensorflow.keras import KerasModel -from lpot.ux.components.model.tensorflow.meta_graph import MetaGraphModel -from lpot.ux.components.model.tensorflow.saved_model import SavedModelModel -from lpot.ux.utils.exceptions import NotFoundException +from neural_compressor.ux.components.model.model import Model +from neural_compressor.ux.components.model.onnxrt.model import OnnxrtModel +from neural_compressor.ux.components.model.tensorflow.frozen_pb import FrozenPbModel +from neural_compressor.ux.components.model.tensorflow.keras import KerasModel +from neural_compressor.ux.components.model.tensorflow.meta_graph import MetaGraphModel +from neural_compressor.ux.components.model.tensorflow.saved_model import SavedModelModel +from neural_compressor.ux.utils.exceptions import NotFoundException class ModelRepository: diff --git a/lpot/ux/components/model/tensorflow/__init__.py b/neural_compressor/ux/components/model/tensorflow/__init__.py similarity index 100% rename from lpot/ux/components/model/tensorflow/__init__.py rename to neural_compressor/ux/components/model/tensorflow/__init__.py diff --git a/lpot/ux/components/model/tensorflow/frozen_pb.py b/neural_compressor/ux/components/model/tensorflow/frozen_pb.py similarity index 83% rename from lpot/ux/components/model/tensorflow/frozen_pb.py rename to neural_compressor/ux/components/model/tensorflow/frozen_pb.py index b3a3cfa5252..5cb2d849bcf 100644 --- a/lpot/ux/components/model/tensorflow/frozen_pb.py +++ b/neural_compressor/ux/components/model/tensorflow/frozen_pb.py @@ -14,8 +14,8 @@ # limitations under the License. """Tensorflow frozen pb model.""" -from lpot.ux.components.model.model_type_getter import get_model_type -from lpot.ux.components.model.tensorflow.model import TensorflowModel as TFModel +from neural_compressor.ux.components.model.model_type_getter import get_model_type +from neural_compressor.ux.components.model.tensorflow.model import TensorflowModel as TFModel class FrozenPbModel(TFModel): diff --git a/lpot/ux/components/model/tensorflow/keras.py b/neural_compressor/ux/components/model/tensorflow/keras.py similarity index 86% rename from lpot/ux/components/model/tensorflow/keras.py rename to neural_compressor/ux/components/model/tensorflow/keras.py index ab7bff6c48d..c8a4f241dca 100644 --- a/lpot/ux/components/model/tensorflow/keras.py +++ b/neural_compressor/ux/components/model/tensorflow/keras.py @@ -16,8 +16,8 @@ from typing import Any, List, Optional -from lpot.ux.components.model.model_type_getter import get_model_type -from lpot.ux.components.model.tensorflow.model import TensorflowModel as TFModel +from neural_compressor.ux.components.model.model_type_getter import get_model_type +from neural_compressor.ux.components.model.tensorflow.model import TensorflowModel as TFModel class KerasModel(TFModel): diff --git a/lpot/ux/components/model/tensorflow/meta_graph.py b/neural_compressor/ux/components/model/tensorflow/meta_graph.py similarity index 84% rename from lpot/ux/components/model/tensorflow/meta_graph.py rename to neural_compressor/ux/components/model/tensorflow/meta_graph.py index e19ff715b16..48d2173c081 100644 --- a/lpot/ux/components/model/tensorflow/meta_graph.py +++ b/neural_compressor/ux/components/model/tensorflow/meta_graph.py @@ -16,9 +16,9 @@ from typing import Any, List, Optional -from lpot.ux.components.graph.graph import Graph -from lpot.ux.components.model.model_type_getter import get_model_type -from lpot.ux.components.model.tensorflow.model import TensorflowModel as TFModel +from neural_compressor.ux.components.graph.graph import Graph +from neural_compressor.ux.components.model.model_type_getter import get_model_type +from neural_compressor.ux.components.model.tensorflow.model import TensorflowModel as TFModel class MetaGraphModel(TFModel): diff --git a/lpot/ux/components/model/tensorflow/model.py b/neural_compressor/ux/components/model/tensorflow/model.py similarity index 61% rename from lpot/ux/components/model/tensorflow/model.py rename to neural_compressor/ux/components/model/tensorflow/model.py index a9846b76752..a3191ffea44 100644 --- a/lpot/ux/components/model/tensorflow/model.py +++ b/neural_compressor/ux/components/model/tensorflow/model.py @@ -16,14 +16,14 @@ import os.path from typing import Any, List, Optional -from lpot.experimental.common.model import Model as LpotModel -from lpot.model.model import TensorflowBaseModel -from lpot.utils.logger import Logger -from lpot.ux.components.graph.graph import Graph -from lpot.ux.components.graph.reader.tensorflow_reader import TensorflowReader -from lpot.ux.components.model.model import Model -from lpot.ux.utils.logger import log -from lpot.ux.utils.utils import check_module +from neural_compressor.experimental.common.model import Model as NCModel +from neural_compressor.model.model import TensorflowBaseModel +from neural_compressor.utils.logger import Logger +from neural_compressor.ux.components.graph.graph import Graph +from neural_compressor.ux.components.graph.reader.tensorflow_reader import TensorflowReader +from neural_compressor.ux.components.model.model import Model +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.utils import check_module class TensorflowModel(Model): @@ -32,21 +32,21 @@ class TensorflowModel(Model): def __init__(self, path: str) -> None: """Initialize object.""" super().__init__(path) - self._lpot_model_instance: Optional[TensorflowBaseModel] = None + self._nc_model_instance: Optional[TensorflowBaseModel] = None def get_input_nodes(self) -> Optional[List[Any]]: """Get model input nodes.""" self.guard_requirements_installed() # pylint: disable=maybe-no-member - return self.lpot_model_instance.input_node_names + return self.nc_model_instance.input_node_names def get_output_nodes(self) -> Optional[List[Any]]: """Get model output nodes.""" self.guard_requirements_installed() # pylint: disable=maybe-no-member - return self.lpot_model_instance.output_node_names + ["custom"] + return self.nc_model_instance.output_node_names + ["custom"] def get_model_graph(self) -> Graph: """Get model Graph.""" @@ -54,19 +54,19 @@ def get_model_graph(self) -> Graph: return graph_reader.read() @property - def lpot_model_instance(self) -> TensorflowBaseModel: - """Get LPOT Model instance.""" - self._ensure_lpot_model_instance() - return self._lpot_model_instance + def nc_model_instance(self) -> TensorflowBaseModel: + """Get Neural Compressor Model instance.""" + self._ensure_nc_model_instance() + return self._nc_model_instance - def _ensure_lpot_model_instance(self) -> None: - """Create LPOT Model instance if needed.""" - if self._lpot_model_instance is not None: + def _ensure_nc_model_instance(self) -> None: + """Create Neural Compressor Model instance if needed.""" + if self._nc_model_instance is not None: return model_name = os.path.splitext(os.path.basename(self.path))[0] Logger().get_logger().setLevel(log.level) - self._lpot_model_instance = LpotModel(self.path) - self._lpot_model_instance.name = model_name + self._nc_model_instance = NCModel(self.path) + self._nc_model_instance.name = model_name @staticmethod def get_framework_name() -> str: diff --git a/lpot/ux/components/model/tensorflow/saved_model.py b/neural_compressor/ux/components/model/tensorflow/saved_model.py similarity index 83% rename from lpot/ux/components/model/tensorflow/saved_model.py rename to neural_compressor/ux/components/model/tensorflow/saved_model.py index ac9388ba885..9bb95823406 100644 --- a/lpot/ux/components/model/tensorflow/saved_model.py +++ b/neural_compressor/ux/components/model/tensorflow/saved_model.py @@ -14,8 +14,8 @@ # limitations under the License. """Tensorflow saved_model model.""" -from lpot.ux.components.model.model_type_getter import get_model_type -from lpot.ux.components.model.tensorflow.model import TensorflowModel as TFModel +from neural_compressor.ux.components.model.model_type_getter import get_model_type +from neural_compressor.ux.components.model.tensorflow.model import TensorflowModel as TFModel class SavedModelModel(TFModel): diff --git a/lpot/ux/components/model_zoo/README.md b/neural_compressor/ux/components/model_zoo/README.md similarity index 100% rename from lpot/ux/components/model_zoo/README.md rename to neural_compressor/ux/components/model_zoo/README.md diff --git a/lpot/ux/components/model_zoo/__init__.py b/neural_compressor/ux/components/model_zoo/__init__.py similarity index 100% rename from lpot/ux/components/model_zoo/__init__.py rename to neural_compressor/ux/components/model_zoo/__init__.py diff --git a/lpot/ux/components/model_zoo/download_config.py b/neural_compressor/ux/components/model_zoo/download_config.py similarity index 91% rename from lpot/ux/components/model_zoo/download_config.py rename to neural_compressor/ux/components/model_zoo/download_config.py index bcd9a77ee1a..d60e34c0b30 100644 --- a/lpot/ux/components/model_zoo/download_config.py +++ b/neural_compressor/ux/components/model_zoo/download_config.py @@ -14,7 +14,7 @@ # limitations under the License. """Download yaml config from GitHub.""" -from lpot.ux.components.model_zoo.downloader import Downloader +from neural_compressor.ux.components.model_zoo.downloader import Downloader def download_config(data: dict) -> str: diff --git a/lpot/ux/components/model_zoo/download_model.py b/neural_compressor/ux/components/model_zoo/download_model.py similarity index 91% rename from lpot/ux/components/model_zoo/download_model.py rename to neural_compressor/ux/components/model_zoo/download_model.py index 75e9583ab78..aa6b89ace13 100644 --- a/lpot/ux/components/model_zoo/download_model.py +++ b/neural_compressor/ux/components/model_zoo/download_model.py @@ -14,7 +14,7 @@ # limitations under the License. """Download model from Examples.""" -from lpot.ux.components.model_zoo.downloader import Downloader +from neural_compressor.ux.components.model_zoo.downloader import Downloader def download_model(data: dict) -> str: diff --git a/lpot/ux/components/model_zoo/downloader.py b/neural_compressor/ux/components/model_zoo/downloader.py similarity index 95% rename from lpot/ux/components/model_zoo/downloader.py rename to neural_compressor/ux/components/model_zoo/downloader.py index ccd96f3ec3c..2bea5dae825 100644 --- a/lpot/ux/components/model_zoo/downloader.py +++ b/neural_compressor/ux/components/model_zoo/downloader.py @@ -21,12 +21,12 @@ import requests -from lpot.ux.utils.consts import github_info -from lpot.ux.utils.exceptions import ClientErrorException -from lpot.ux.utils.logger import log -from lpot.ux.utils.utils import is_development_env, load_model_config -from lpot.ux.web.communication import MessageQueue -from lpot.ux.web.configuration import Configuration +from neural_compressor.ux.utils.consts import github_info +from neural_compressor.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.utils import is_development_env, load_model_config +from neural_compressor.ux.web.communication import MessageQueue +from neural_compressor.ux.web.configuration import Configuration class Downloader: @@ -264,11 +264,11 @@ def get_yaml_url( ), ) url = os.path.join( - os.environ["LPOT_PROJECT_URL"], + os.environ["NC_PROJECT_URL"], file_path, "raw?ref=developer", ) - headers = {"Private-Token": os.environ.get("LPOT_TOKEN")} + headers = {"Private-Token": os.environ.get("NC_TOKEN")} return url, headers user = github_info.get("user") repository = github_info.get("repository") diff --git a/lpot/ux/components/model_zoo/list_models.py b/neural_compressor/ux/components/model_zoo/list_models.py similarity index 92% rename from lpot/ux/components/model_zoo/list_models.py rename to neural_compressor/ux/components/model_zoo/list_models.py index 2020a670ddf..25a9cac4c27 100644 --- a/lpot/ux/components/model_zoo/list_models.py +++ b/neural_compressor/ux/components/model_zoo/list_models.py @@ -16,11 +16,11 @@ from typing import Any, Dict, List, Optional -from lpot.ux.components.model.repository import ModelRepository -from lpot.ux.utils.exceptions import ClientErrorException -from lpot.ux.utils.logger import log -from lpot.ux.utils.templates.workdir import Workdir -from lpot.ux.utils.utils import ( +from neural_compressor.ux.components.model.repository import ModelRepository +from neural_compressor.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.templates.workdir import Workdir +from neural_compressor.ux.utils.utils import ( get_model_zoo_config_path, get_model_zoo_model_path, get_module_version, diff --git a/lpot/ux/components/model_zoo/save_workload.py b/neural_compressor/ux/components/model_zoo/save_workload.py similarity index 80% rename from lpot/ux/components/model_zoo/save_workload.py rename to neural_compressor/ux/components/model_zoo/save_workload.py index 810668aea1d..e4847909fdd 100644 --- a/lpot/ux/components/model_zoo/save_workload.py +++ b/neural_compressor/ux/components/model_zoo/save_workload.py @@ -18,12 +18,12 @@ import os from typing import Any, Dict -from lpot.ux.components.configuration_wizard.configuration_parser import ConfigurationParser -from lpot.ux.components.model_zoo.download_config import download_config -from lpot.ux.components.model_zoo.download_model import download_model -from lpot.ux.utils.templates.workdir import Workdir -from lpot.ux.utils.workload.workload import Workload -from lpot.ux.web.communication import MessageQueue +from neural_compressor.ux.components.configuration_wizard.configuration_parser import ConfigurationParser +from neural_compressor.ux.components.model_zoo.download_config import download_config +from neural_compressor.ux.components.model_zoo.download_model import download_model +from neural_compressor.ux.utils.templates.workdir import Workdir +from neural_compressor.ux.utils.workload.workload import Workload +from neural_compressor.ux.web.communication import MessageQueue logging.basicConfig(level=logging.INFO) diff --git a/lpot/ux/components/optimization/__init__.py b/neural_compressor/ux/components/optimization/__init__.py similarity index 100% rename from lpot/ux/components/optimization/__init__.py rename to neural_compressor/ux/components/optimization/__init__.py diff --git a/lpot/ux/components/optimization/execute_optimization.py b/neural_compressor/ux/components/optimization/execute_optimization.py similarity index 86% rename from lpot/ux/components/optimization/execute_optimization.py rename to neural_compressor/ux/components/optimization/execute_optimization.py index 107fe2d44ff..832cbb572b8 100644 --- a/lpot/ux/components/optimization/execute_optimization.py +++ b/neural_compressor/ux/components/optimization/execute_optimization.py @@ -20,23 +20,23 @@ import threading from typing import Any, Dict, Optional -from lpot.ux.components.optimization.factory import OptimizationFactory -from lpot.ux.components.optimization.optimization import Optimization -from lpot.ux.components.optimization.tuning_history import Watcher -from lpot.ux.utils.exceptions import ClientErrorException -from lpot.ux.utils.executor import Executor -from lpot.ux.utils.logger import log -from lpot.ux.utils.parser import OptimizationParser -from lpot.ux.utils.templates.workdir import Workdir -from lpot.ux.utils.utils import _load_json_as_dict, get_size -from lpot.ux.web.communication import MessageQueue +from neural_compressor.ux.components.optimization.factory import OptimizationFactory +from neural_compressor.ux.components.optimization.optimization import Optimization +from neural_compressor.ux.components.optimization.tuning_history import Watcher +from neural_compressor.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.executor import Executor +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.parser import OptimizationParser +from neural_compressor.ux.utils.templates.workdir import Workdir +from neural_compressor.ux.utils.utils import _load_json_as_dict, get_size +from neural_compressor.ux.web.communication import MessageQueue mq = MessageQueue() def execute_optimization(data: Dict[str, Any]) -> dict: """Get configuration.""" - from lpot.ux.utils.workload.workload import Workload + from neural_compressor.ux.utils.workload.workload import Workload if not str(data.get("id", "")): message = "Missing request id." diff --git a/lpot/ux/components/optimization/factory.py b/neural_compressor/ux/components/optimization/factory.py similarity index 72% rename from lpot/ux/components/optimization/factory.py rename to neural_compressor/ux/components/optimization/factory.py index f82081eea72..0dd8b0c3e8b 100644 --- a/lpot/ux/components/optimization/factory.py +++ b/neural_compressor/ux/components/optimization/factory.py @@ -16,13 +16,13 @@ from typing import Optional -from lpot.ux.components.optimization import Optimizations -from lpot.ux.components.optimization.graph_optimizer.graph_optimization import GraphOptimization -from lpot.ux.components.optimization.optimization import Optimization -from lpot.ux.components.optimization.tune.tuning import Tuning -from lpot.ux.utils.exceptions import InternalException -from lpot.ux.utils.logger import log -from lpot.ux.utils.workload.workload import Workload +from neural_compressor.ux.components.optimization import Optimizations +from neural_compressor.ux.components.optimization.graph_optimizer.graph_optimization import GraphOptimization +from neural_compressor.ux.components.optimization.optimization import Optimization +from neural_compressor.ux.components.optimization.tune.tuning import Tuning +from neural_compressor.ux.utils.exceptions import InternalException +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.workload.workload import Workload class OptimizationFactory: diff --git a/lpot/ux/components/optimization/graph_optimizer/__init__.py b/neural_compressor/ux/components/optimization/graph_optimizer/__init__.py similarity index 100% rename from lpot/ux/components/optimization/graph_optimizer/__init__.py rename to neural_compressor/ux/components/optimization/graph_optimizer/__init__.py diff --git a/lpot/ux/components/optimization/graph_optimizer/graph_optimization.py b/neural_compressor/ux/components/optimization/graph_optimizer/graph_optimization.py similarity index 93% rename from lpot/ux/components/optimization/graph_optimizer/graph_optimization.py rename to neural_compressor/ux/components/optimization/graph_optimizer/graph_optimization.py index fcbdc189fa7..e7d38360760 100644 --- a/lpot/ux/components/optimization/graph_optimizer/graph_optimization.py +++ b/neural_compressor/ux/components/optimization/graph_optimizer/graph_optimization.py @@ -17,11 +17,11 @@ import os from typing import List -from lpot.ux.components.optimization.graph_optimizer.optimize_model import ( +from neural_compressor.ux.components.optimization.graph_optimizer.optimize_model import ( optimize_graph, optimize_graph_config, ) -from lpot.ux.components.optimization.optimization import Optimization +from neural_compressor.ux.components.optimization.optimization import Optimization class GraphOptimization(Optimization): diff --git a/lpot/ux/components/optimization/graph_optimizer/optimize_model.py b/neural_compressor/ux/components/optimization/graph_optimizer/optimize_model.py similarity index 94% rename from lpot/ux/components/optimization/graph_optimizer/optimize_model.py rename to neural_compressor/ux/components/optimization/graph_optimizer/optimize_model.py index 4599d824ed1..8b8d67191bd 100644 --- a/lpot/ux/components/optimization/graph_optimizer/optimize_model.py +++ b/neural_compressor/ux/components/optimization/graph_optimizer/optimize_model.py @@ -79,7 +79,7 @@ def optimize_graph( output: Optional[str] = None, ) -> None: """Execute graph optimization.""" - from lpot.experimental import Graph_Optimization + from neural_compressor.experimental import Graph_Optimization if framework == "onnxrt": import onnx @@ -107,7 +107,7 @@ def optimize_graph_config( config: str, ) -> None: """Execute graph optimization using config file.""" - from lpot.experimental import Graph_Optimization + from neural_compressor.experimental import Graph_Optimization if framework == "onnxrt": import onnx @@ -125,7 +125,7 @@ def optimize_graph_config( def set_eager_execution(input_graph: str) -> None: """Set eager execution as required by model.""" - from lpot.ux.components.model.model_type_getter import get_model_type + from neural_compressor.ux.components.model.model_type_getter import get_model_type model_type = get_model_type(input_graph) diff --git a/lpot/ux/components/optimization/optimization.py b/neural_compressor/ux/components/optimization/optimization.py similarity index 93% rename from lpot/ux/components/optimization/optimization.py rename to neural_compressor/ux/components/optimization/optimization.py index 2fadf67f9a1..2d838eb97d7 100644 --- a/lpot/ux/components/optimization/optimization.py +++ b/neural_compressor/ux/components/optimization/optimization.py @@ -19,10 +19,10 @@ from abc import abstractmethod from typing import Any, Dict, List, Optional -from lpot.ux.utils.exceptions import ClientErrorException, InternalException -from lpot.ux.utils.hw_info import HWInfo -from lpot.ux.utils.json_serializer import JsonSerializer -from lpot.ux.utils.workload.workload import Workload +from neural_compressor.ux.utils.exceptions import ClientErrorException, InternalException +from neural_compressor.ux.utils.hw_info import HWInfo +from neural_compressor.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.workload.workload import Workload class Optimization(JsonSerializer): diff --git a/lpot/ux/components/optimization/tune/__init__.py b/neural_compressor/ux/components/optimization/tune/__init__.py similarity index 100% rename from lpot/ux/components/optimization/tune/__init__.py rename to neural_compressor/ux/components/optimization/tune/__init__.py diff --git a/lpot/ux/components/optimization/tune/tune_model.py b/neural_compressor/ux/components/optimization/tune/tune_model.py similarity index 94% rename from lpot/ux/components/optimization/tune/tune_model.py rename to neural_compressor/ux/components/optimization/tune/tune_model.py index edcb6aa9853..6f6d0858b68 100644 --- a/lpot/ux/components/optimization/tune/tune_model.py +++ b/neural_compressor/ux/components/optimization/tune/tune_model.py @@ -56,7 +56,7 @@ def tune_model( framework: str, ) -> None: """Execute tuning.""" - from lpot.experimental import Quantization, common + from neural_compressor.experimental import Quantization, common if framework == "onnxrt": import onnx @@ -74,7 +74,7 @@ def tune_model( def set_eager_execution(input_graph: str) -> None: """Set eager execution as required by model.""" - from lpot.ux.components.model.model_type_getter import get_model_type + from neural_compressor.ux.components.model.model_type_getter import get_model_type model_type = get_model_type(input_graph) diff --git a/lpot/ux/components/optimization/tune/tuning.py b/neural_compressor/ux/components/optimization/tune/tuning.py similarity index 91% rename from lpot/ux/components/optimization/tune/tuning.py rename to neural_compressor/ux/components/optimization/tune/tuning.py index 89c1854f382..e9fd53f8790 100644 --- a/lpot/ux/components/optimization/tune/tuning.py +++ b/neural_compressor/ux/components/optimization/tune/tuning.py @@ -17,8 +17,8 @@ import os from typing import List, Optional -from lpot.ux.components.optimization.optimization import Optimization -from lpot.ux.utils.workload.workload import Workload +from neural_compressor.ux.components.optimization.optimization import Optimization +from neural_compressor.ux.utils.workload.workload import Workload class Tuning(Optimization): diff --git a/lpot/ux/components/optimization/tuning_history.py b/neural_compressor/ux/components/optimization/tuning_history.py similarity index 89% rename from lpot/ux/components/optimization/tuning_history.py rename to neural_compressor/ux/components/optimization/tuning_history.py index 2447a6e1c8f..57e1eadc288 100644 --- a/lpot/ux/components/optimization/tuning_history.py +++ b/neural_compressor/ux/components/optimization/tuning_history.py @@ -18,15 +18,15 @@ from time import sleep from typing import Optional -from lpot.utils.utility import get_tuning_history -from lpot.ux.utils.exceptions import NotFoundException -from lpot.ux.utils.workload.config import Config -from lpot.ux.web.service.history_snapshot_parser import HistorySnapshotParser +from neural_compressor.utils.utility import get_tuning_history +from neural_compressor.ux.utils.exceptions import NotFoundException +from neural_compressor.ux.utils.workload.config import Config +from neural_compressor.ux.web.service.history_snapshot_parser import HistorySnapshotParser def tuning_history(workload_id: str) -> dict: """Get tuning history for requested Workload.""" - from lpot.ux.web.service.workload import WorkloadService + from neural_compressor.ux.web.service.workload import WorkloadService workload_data = WorkloadService.get_workload_data_by_id(workload_id) @@ -87,7 +87,7 @@ def _build_tuning_history(workload_data: dict) -> dict: def tuning_history_filename(workload_id: str) -> str: """Build tuning history filename based on id.""" - from lpot.ux.web.service.workload import WorkloadService + from neural_compressor.ux.web.service.workload import WorkloadService workload_data = WorkloadService.get_workload_data_by_id(workload_id) workload_path = workload_data.get("workload_path", "") @@ -112,7 +112,7 @@ def stop(self) -> None: def __call__(self) -> None: """Execute the watch.""" - from lpot.ux.web.service.workload import WorkloadService + from neural_compressor.ux.web.service.workload import WorkloadService self.watch = True while self.watch: diff --git a/lpot/ux/gui/.browserslistrc b/neural_compressor/ux/gui/.browserslistrc similarity index 100% rename from lpot/ux/gui/.browserslistrc rename to neural_compressor/ux/gui/.browserslistrc diff --git a/lpot/ux/gui/.editorconfig b/neural_compressor/ux/gui/.editorconfig similarity index 100% rename from lpot/ux/gui/.editorconfig rename to neural_compressor/ux/gui/.editorconfig diff --git a/lpot/ux/gui/.gitignore b/neural_compressor/ux/gui/.gitignore similarity index 100% rename from lpot/ux/gui/.gitignore rename to neural_compressor/ux/gui/.gitignore diff --git a/lpot/ux/gui/README.md b/neural_compressor/ux/gui/README.md similarity index 100% rename from lpot/ux/gui/README.md rename to neural_compressor/ux/gui/README.md diff --git a/lpot/ux/gui/angular.json b/neural_compressor/ux/gui/angular.json similarity index 100% rename from lpot/ux/gui/angular.json rename to neural_compressor/ux/gui/angular.json diff --git a/lpot/ux/gui/karma.conf.js b/neural_compressor/ux/gui/karma.conf.js similarity index 100% rename from lpot/ux/gui/karma.conf.js rename to neural_compressor/ux/gui/karma.conf.js diff --git a/lpot/ux/gui/package-lock.json b/neural_compressor/ux/gui/package-lock.json similarity index 100% rename from lpot/ux/gui/package-lock.json rename to neural_compressor/ux/gui/package-lock.json diff --git a/lpot/ux/gui/package.json b/neural_compressor/ux/gui/package.json similarity index 100% rename from lpot/ux/gui/package.json rename to neural_compressor/ux/gui/package.json diff --git a/lpot/ux/gui/src/app/app-routing.module.ts b/neural_compressor/ux/gui/src/app/app-routing.module.ts similarity index 100% rename from lpot/ux/gui/src/app/app-routing.module.ts rename to neural_compressor/ux/gui/src/app/app-routing.module.ts diff --git a/lpot/ux/gui/src/app/app.component.html b/neural_compressor/ux/gui/src/app/app.component.html similarity index 100% rename from lpot/ux/gui/src/app/app.component.html rename to neural_compressor/ux/gui/src/app/app.component.html diff --git a/lpot/ux/gui/src/app/app.component.scss b/neural_compressor/ux/gui/src/app/app.component.scss similarity index 100% rename from lpot/ux/gui/src/app/app.component.scss rename to neural_compressor/ux/gui/src/app/app.component.scss diff --git a/lpot/ux/gui/src/app/app.component.spec.ts b/neural_compressor/ux/gui/src/app/app.component.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/app.component.spec.ts rename to neural_compressor/ux/gui/src/app/app.component.spec.ts diff --git a/lpot/ux/gui/src/app/app.component.ts b/neural_compressor/ux/gui/src/app/app.component.ts similarity index 100% rename from lpot/ux/gui/src/app/app.component.ts rename to neural_compressor/ux/gui/src/app/app.component.ts diff --git a/lpot/ux/gui/src/app/app.module.ts b/neural_compressor/ux/gui/src/app/app.module.ts similarity index 100% rename from lpot/ux/gui/src/app/app.module.ts rename to neural_compressor/ux/gui/src/app/app.module.ts diff --git a/lpot/ux/gui/src/app/authentication/authentication.interceptor.spec.ts b/neural_compressor/ux/gui/src/app/authentication/authentication.interceptor.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/authentication/authentication.interceptor.spec.ts rename to neural_compressor/ux/gui/src/app/authentication/authentication.interceptor.spec.ts diff --git a/lpot/ux/gui/src/app/authentication/authentication.interceptor.ts b/neural_compressor/ux/gui/src/app/authentication/authentication.interceptor.ts similarity index 100% rename from lpot/ux/gui/src/app/authentication/authentication.interceptor.ts rename to neural_compressor/ux/gui/src/app/authentication/authentication.interceptor.ts diff --git a/lpot/ux/gui/src/app/details/details.component.html b/neural_compressor/ux/gui/src/app/details/details.component.html similarity index 100% rename from lpot/ux/gui/src/app/details/details.component.html rename to neural_compressor/ux/gui/src/app/details/details.component.html diff --git a/lpot/ux/gui/src/app/details/details.component.scss b/neural_compressor/ux/gui/src/app/details/details.component.scss similarity index 100% rename from lpot/ux/gui/src/app/details/details.component.scss rename to neural_compressor/ux/gui/src/app/details/details.component.scss diff --git a/lpot/ux/gui/src/app/details/details.component.spec.ts b/neural_compressor/ux/gui/src/app/details/details.component.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/details/details.component.spec.ts rename to neural_compressor/ux/gui/src/app/details/details.component.spec.ts diff --git a/lpot/ux/gui/src/app/details/details.component.ts b/neural_compressor/ux/gui/src/app/details/details.component.ts similarity index 100% rename from lpot/ux/gui/src/app/details/details.component.ts rename to neural_compressor/ux/gui/src/app/details/details.component.ts diff --git a/lpot/ux/gui/src/app/error/error.component.html b/neural_compressor/ux/gui/src/app/error/error.component.html similarity index 100% rename from lpot/ux/gui/src/app/error/error.component.html rename to neural_compressor/ux/gui/src/app/error/error.component.html diff --git a/lpot/ux/gui/src/app/error/error.component.scss b/neural_compressor/ux/gui/src/app/error/error.component.scss similarity index 100% rename from lpot/ux/gui/src/app/error/error.component.scss rename to neural_compressor/ux/gui/src/app/error/error.component.scss diff --git a/lpot/ux/gui/src/app/error/error.component.spec.ts b/neural_compressor/ux/gui/src/app/error/error.component.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/error/error.component.spec.ts rename to neural_compressor/ux/gui/src/app/error/error.component.spec.ts diff --git a/lpot/ux/gui/src/app/error/error.component.ts b/neural_compressor/ux/gui/src/app/error/error.component.ts similarity index 100% rename from lpot/ux/gui/src/app/error/error.component.ts rename to neural_compressor/ux/gui/src/app/error/error.component.ts diff --git a/lpot/ux/gui/src/app/file-browser/file-browser.component.html b/neural_compressor/ux/gui/src/app/file-browser/file-browser.component.html similarity index 100% rename from lpot/ux/gui/src/app/file-browser/file-browser.component.html rename to neural_compressor/ux/gui/src/app/file-browser/file-browser.component.html diff --git a/lpot/ux/gui/src/app/file-browser/file-browser.component.scss b/neural_compressor/ux/gui/src/app/file-browser/file-browser.component.scss similarity index 100% rename from lpot/ux/gui/src/app/file-browser/file-browser.component.scss rename to neural_compressor/ux/gui/src/app/file-browser/file-browser.component.scss diff --git a/lpot/ux/gui/src/app/file-browser/file-browser.component.spec.ts b/neural_compressor/ux/gui/src/app/file-browser/file-browser.component.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/file-browser/file-browser.component.spec.ts rename to neural_compressor/ux/gui/src/app/file-browser/file-browser.component.spec.ts diff --git a/lpot/ux/gui/src/app/file-browser/file-browser.component.ts b/neural_compressor/ux/gui/src/app/file-browser/file-browser.component.ts similarity index 100% rename from lpot/ux/gui/src/app/file-browser/file-browser.component.ts rename to neural_compressor/ux/gui/src/app/file-browser/file-browser.component.ts diff --git a/lpot/ux/gui/src/app/graph/graph.component.html b/neural_compressor/ux/gui/src/app/graph/graph.component.html similarity index 100% rename from lpot/ux/gui/src/app/graph/graph.component.html rename to neural_compressor/ux/gui/src/app/graph/graph.component.html diff --git a/lpot/ux/gui/src/app/graph/graph.component.scss b/neural_compressor/ux/gui/src/app/graph/graph.component.scss similarity index 100% rename from lpot/ux/gui/src/app/graph/graph.component.scss rename to neural_compressor/ux/gui/src/app/graph/graph.component.scss diff --git a/lpot/ux/gui/src/app/graph/graph.component.spec.ts b/neural_compressor/ux/gui/src/app/graph/graph.component.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/graph/graph.component.spec.ts rename to neural_compressor/ux/gui/src/app/graph/graph.component.spec.ts diff --git a/lpot/ux/gui/src/app/graph/graph.component.ts b/neural_compressor/ux/gui/src/app/graph/graph.component.ts similarity index 100% rename from lpot/ux/gui/src/app/graph/graph.component.ts rename to neural_compressor/ux/gui/src/app/graph/graph.component.ts diff --git a/lpot/ux/gui/src/app/home/home.component.html b/neural_compressor/ux/gui/src/app/home/home.component.html similarity index 100% rename from lpot/ux/gui/src/app/home/home.component.html rename to neural_compressor/ux/gui/src/app/home/home.component.html diff --git a/lpot/ux/gui/src/app/home/home.component.scss b/neural_compressor/ux/gui/src/app/home/home.component.scss similarity index 100% rename from lpot/ux/gui/src/app/home/home.component.scss rename to neural_compressor/ux/gui/src/app/home/home.component.scss diff --git a/lpot/ux/gui/src/app/home/home.component.spec.ts b/neural_compressor/ux/gui/src/app/home/home.component.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/home/home.component.spec.ts rename to neural_compressor/ux/gui/src/app/home/home.component.spec.ts diff --git a/lpot/ux/gui/src/app/home/home.component.ts b/neural_compressor/ux/gui/src/app/home/home.component.ts similarity index 100% rename from lpot/ux/gui/src/app/home/home.component.ts rename to neural_compressor/ux/gui/src/app/home/home.component.ts diff --git a/lpot/ux/gui/src/app/import-model/import-model.component.html b/neural_compressor/ux/gui/src/app/import-model/import-model.component.html similarity index 99% rename from lpot/ux/gui/src/app/import-model/import-model.component.html rename to neural_compressor/ux/gui/src/app/import-model/import-model.component.html index 2a17c30b1fc..222e0d3a661 100644 --- a/lpot/ux/gui/src/app/import-model/import-model.component.html +++ b/neural_compressor/ux/gui/src/app/import-model/import-model.component.html @@ -264,7 +264,7 @@

Evaluation

Transforms - + @@ -333,7 +333,7 @@

Transforms

Metric - +

diff --git a/lpot/ux/gui/src/app/import-model/import-model.component.scss b/neural_compressor/ux/gui/src/app/import-model/import-model.component.scss similarity index 100% rename from lpot/ux/gui/src/app/import-model/import-model.component.scss rename to neural_compressor/ux/gui/src/app/import-model/import-model.component.scss diff --git a/lpot/ux/gui/src/app/import-model/import-model.component.spec.ts b/neural_compressor/ux/gui/src/app/import-model/import-model.component.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/import-model/import-model.component.spec.ts rename to neural_compressor/ux/gui/src/app/import-model/import-model.component.spec.ts diff --git a/lpot/ux/gui/src/app/import-model/import-model.component.ts b/neural_compressor/ux/gui/src/app/import-model/import-model.component.ts similarity index 100% rename from lpot/ux/gui/src/app/import-model/import-model.component.ts rename to neural_compressor/ux/gui/src/app/import-model/import-model.component.ts diff --git a/lpot/ux/gui/src/app/menu/menu.component.html b/neural_compressor/ux/gui/src/app/menu/menu.component.html similarity index 100% rename from lpot/ux/gui/src/app/menu/menu.component.html rename to neural_compressor/ux/gui/src/app/menu/menu.component.html diff --git a/lpot/ux/gui/src/app/menu/menu.component.scss b/neural_compressor/ux/gui/src/app/menu/menu.component.scss similarity index 100% rename from lpot/ux/gui/src/app/menu/menu.component.scss rename to neural_compressor/ux/gui/src/app/menu/menu.component.scss diff --git a/lpot/ux/gui/src/app/menu/menu.component.spec.ts b/neural_compressor/ux/gui/src/app/menu/menu.component.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/menu/menu.component.spec.ts rename to neural_compressor/ux/gui/src/app/menu/menu.component.spec.ts diff --git a/lpot/ux/gui/src/app/menu/menu.component.ts b/neural_compressor/ux/gui/src/app/menu/menu.component.ts similarity index 100% rename from lpot/ux/gui/src/app/menu/menu.component.ts rename to neural_compressor/ux/gui/src/app/menu/menu.component.ts diff --git a/lpot/ux/gui/src/app/model-list/model-list.component.html b/neural_compressor/ux/gui/src/app/model-list/model-list.component.html similarity index 100% rename from lpot/ux/gui/src/app/model-list/model-list.component.html rename to neural_compressor/ux/gui/src/app/model-list/model-list.component.html diff --git a/lpot/ux/gui/src/app/model-list/model-list.component.scss b/neural_compressor/ux/gui/src/app/model-list/model-list.component.scss similarity index 100% rename from lpot/ux/gui/src/app/model-list/model-list.component.scss rename to neural_compressor/ux/gui/src/app/model-list/model-list.component.scss diff --git a/lpot/ux/gui/src/app/model-list/model-list.component.spec.ts b/neural_compressor/ux/gui/src/app/model-list/model-list.component.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/model-list/model-list.component.spec.ts rename to neural_compressor/ux/gui/src/app/model-list/model-list.component.spec.ts diff --git a/lpot/ux/gui/src/app/model-list/model-list.component.ts b/neural_compressor/ux/gui/src/app/model-list/model-list.component.ts similarity index 100% rename from lpot/ux/gui/src/app/model-list/model-list.component.ts rename to neural_compressor/ux/gui/src/app/model-list/model-list.component.ts diff --git a/lpot/ux/gui/src/app/pipes/long-name.pipe.ts b/neural_compressor/ux/gui/src/app/pipes/long-name.pipe.ts similarity index 100% rename from lpot/ux/gui/src/app/pipes/long-name.pipe.ts rename to neural_compressor/ux/gui/src/app/pipes/long-name.pipe.ts diff --git a/lpot/ux/gui/src/app/pipes/model-list.pipe.ts b/neural_compressor/ux/gui/src/app/pipes/model-list.pipe.ts similarity index 100% rename from lpot/ux/gui/src/app/pipes/model-list.pipe.ts rename to neural_compressor/ux/gui/src/app/pipes/model-list.pipe.ts diff --git a/lpot/ux/gui/src/app/predefined-models/predefined-models.component.html b/neural_compressor/ux/gui/src/app/predefined-models/predefined-models.component.html similarity index 100% rename from lpot/ux/gui/src/app/predefined-models/predefined-models.component.html rename to neural_compressor/ux/gui/src/app/predefined-models/predefined-models.component.html diff --git a/lpot/ux/gui/src/app/predefined-models/predefined-models.component.scss b/neural_compressor/ux/gui/src/app/predefined-models/predefined-models.component.scss similarity index 100% rename from lpot/ux/gui/src/app/predefined-models/predefined-models.component.scss rename to neural_compressor/ux/gui/src/app/predefined-models/predefined-models.component.scss diff --git a/lpot/ux/gui/src/app/predefined-models/predefined-models.component.spec.ts b/neural_compressor/ux/gui/src/app/predefined-models/predefined-models.component.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/predefined-models/predefined-models.component.spec.ts rename to neural_compressor/ux/gui/src/app/predefined-models/predefined-models.component.spec.ts diff --git a/lpot/ux/gui/src/app/predefined-models/predefined-models.component.ts b/neural_compressor/ux/gui/src/app/predefined-models/predefined-models.component.ts similarity index 100% rename from lpot/ux/gui/src/app/predefined-models/predefined-models.component.ts rename to neural_compressor/ux/gui/src/app/predefined-models/predefined-models.component.ts diff --git a/lpot/ux/gui/src/app/services/model.service.spec.ts b/neural_compressor/ux/gui/src/app/services/model.service.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/services/model.service.spec.ts rename to neural_compressor/ux/gui/src/app/services/model.service.spec.ts diff --git a/lpot/ux/gui/src/app/services/model.service.ts b/neural_compressor/ux/gui/src/app/services/model.service.ts similarity index 100% rename from lpot/ux/gui/src/app/services/model.service.ts rename to neural_compressor/ux/gui/src/app/services/model.service.ts diff --git a/lpot/ux/gui/src/app/services/socket.service.spec.ts b/neural_compressor/ux/gui/src/app/services/socket.service.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/services/socket.service.spec.ts rename to neural_compressor/ux/gui/src/app/services/socket.service.spec.ts diff --git a/lpot/ux/gui/src/app/services/socket.service.ts b/neural_compressor/ux/gui/src/app/services/socket.service.ts similarity index 100% rename from lpot/ux/gui/src/app/services/socket.service.ts rename to neural_compressor/ux/gui/src/app/services/socket.service.ts diff --git a/lpot/ux/gui/src/app/system-info/system-info.component.html b/neural_compressor/ux/gui/src/app/system-info/system-info.component.html similarity index 100% rename from lpot/ux/gui/src/app/system-info/system-info.component.html rename to neural_compressor/ux/gui/src/app/system-info/system-info.component.html diff --git a/lpot/ux/gui/src/app/system-info/system-info.component.scss b/neural_compressor/ux/gui/src/app/system-info/system-info.component.scss similarity index 100% rename from lpot/ux/gui/src/app/system-info/system-info.component.scss rename to neural_compressor/ux/gui/src/app/system-info/system-info.component.scss diff --git a/lpot/ux/gui/src/app/system-info/system-info.component.spec.ts b/neural_compressor/ux/gui/src/app/system-info/system-info.component.spec.ts similarity index 100% rename from lpot/ux/gui/src/app/system-info/system-info.component.spec.ts rename to neural_compressor/ux/gui/src/app/system-info/system-info.component.spec.ts diff --git a/lpot/ux/gui/src/app/system-info/system-info.component.ts b/neural_compressor/ux/gui/src/app/system-info/system-info.component.ts similarity index 100% rename from lpot/ux/gui/src/app/system-info/system-info.component.ts rename to neural_compressor/ux/gui/src/app/system-info/system-info.component.ts diff --git a/lpot/ux/gui/src/assets/.gitkeep b/neural_compressor/ux/gui/src/assets/.gitkeep similarity index 100% rename from lpot/ux/gui/src/assets/.gitkeep rename to neural_compressor/ux/gui/src/assets/.gitkeep diff --git a/lpot/ux/gui/src/assets/004a-information-solid.svg b/neural_compressor/ux/gui/src/assets/004a-information-solid.svg similarity index 100% rename from lpot/ux/gui/src/assets/004a-information-solid.svg rename to neural_compressor/ux/gui/src/assets/004a-information-solid.svg diff --git a/lpot/ux/gui/src/assets/005a-help-solid-gray.svg b/neural_compressor/ux/gui/src/assets/005a-help-solid-gray.svg similarity index 100% rename from lpot/ux/gui/src/assets/005a-help-solid-gray.svg rename to neural_compressor/ux/gui/src/assets/005a-help-solid-gray.svg diff --git a/lpot/ux/gui/src/assets/005a-help-solid.svg b/neural_compressor/ux/gui/src/assets/005a-help-solid.svg similarity index 100% rename from lpot/ux/gui/src/assets/005a-help-solid.svg rename to neural_compressor/ux/gui/src/assets/005a-help-solid.svg diff --git a/lpot/ux/gui/src/assets/006a-alert-solid-red.svg b/neural_compressor/ux/gui/src/assets/006a-alert-solid-red.svg similarity index 100% rename from lpot/ux/gui/src/assets/006a-alert-solid-red.svg rename to neural_compressor/ux/gui/src/assets/006a-alert-solid-red.svg diff --git a/lpot/ux/gui/src/assets/007a-minus-solid.svg b/neural_compressor/ux/gui/src/assets/007a-minus-solid.svg similarity index 100% rename from lpot/ux/gui/src/assets/007a-minus-solid.svg rename to neural_compressor/ux/gui/src/assets/007a-minus-solid.svg diff --git a/lpot/ux/gui/src/assets/008a-plus-solid-blue.svg b/neural_compressor/ux/gui/src/assets/008a-plus-solid-blue.svg similarity index 100% rename from lpot/ux/gui/src/assets/008a-plus-solid-blue.svg rename to neural_compressor/ux/gui/src/assets/008a-plus-solid-blue.svg diff --git a/lpot/ux/gui/src/assets/008a-plus-solid.svg b/neural_compressor/ux/gui/src/assets/008a-plus-solid.svg similarity index 100% rename from lpot/ux/gui/src/assets/008a-plus-solid.svg rename to neural_compressor/ux/gui/src/assets/008a-plus-solid.svg diff --git a/lpot/ux/gui/src/assets/009a-close-solid.svg b/neural_compressor/ux/gui/src/assets/009a-close-solid.svg similarity index 100% rename from lpot/ux/gui/src/assets/009a-close-solid.svg rename to neural_compressor/ux/gui/src/assets/009a-close-solid.svg diff --git a/lpot/ux/gui/src/assets/010a-passed-completed-solid.svg b/neural_compressor/ux/gui/src/assets/010a-passed-completed-solid.svg similarity index 100% rename from lpot/ux/gui/src/assets/010a-passed-completed-solid.svg rename to neural_compressor/ux/gui/src/assets/010a-passed-completed-solid.svg diff --git a/lpot/ux/gui/src/assets/016-edit.svg b/neural_compressor/ux/gui/src/assets/016-edit.svg similarity index 100% rename from lpot/ux/gui/src/assets/016-edit.svg rename to neural_compressor/ux/gui/src/assets/016-edit.svg diff --git a/lpot/ux/gui/src/assets/050a-folder-solid-white.svg b/neural_compressor/ux/gui/src/assets/050a-folder-solid-white.svg similarity index 100% rename from lpot/ux/gui/src/assets/050a-folder-solid-white.svg rename to neural_compressor/ux/gui/src/assets/050a-folder-solid-white.svg diff --git a/lpot/ux/gui/src/assets/050a-folder-solid.svg b/neural_compressor/ux/gui/src/assets/050a-folder-solid.svg similarity index 100% rename from lpot/ux/gui/src/assets/050a-folder-solid.svg rename to neural_compressor/ux/gui/src/assets/050a-folder-solid.svg diff --git a/lpot/ux/gui/src/assets/056a-save-solid-white.svg b/neural_compressor/ux/gui/src/assets/056a-save-solid-white.svg similarity index 100% rename from lpot/ux/gui/src/assets/056a-save-solid-white.svg rename to neural_compressor/ux/gui/src/assets/056a-save-solid-white.svg diff --git a/lpot/ux/gui/src/assets/057b-trash-outlined.svg b/neural_compressor/ux/gui/src/assets/057b-trash-outlined.svg similarity index 100% rename from lpot/ux/gui/src/assets/057b-trash-outlined.svg rename to neural_compressor/ux/gui/src/assets/057b-trash-outlined.svg diff --git a/lpot/ux/gui/src/assets/073-menu.svg b/neural_compressor/ux/gui/src/assets/073-menu.svg similarity index 100% rename from lpot/ux/gui/src/assets/073-menu.svg rename to neural_compressor/ux/gui/src/assets/073-menu.svg diff --git a/lpot/ux/gui/src/assets/074-rewind-reverse.svg b/neural_compressor/ux/gui/src/assets/074-rewind-reverse.svg similarity index 100% rename from lpot/ux/gui/src/assets/074-rewind-reverse.svg rename to neural_compressor/ux/gui/src/assets/074-rewind-reverse.svg diff --git a/lpot/ux/gui/src/assets/077-arrow-up.svg b/neural_compressor/ux/gui/src/assets/077-arrow-up.svg similarity index 100% rename from lpot/ux/gui/src/assets/077-arrow-up.svg rename to neural_compressor/ux/gui/src/assets/077-arrow-up.svg diff --git a/lpot/ux/gui/src/assets/083-arrow-forward-right.svg b/neural_compressor/ux/gui/src/assets/083-arrow-forward-right.svg similarity index 100% rename from lpot/ux/gui/src/assets/083-arrow-forward-right.svg rename to neural_compressor/ux/gui/src/assets/083-arrow-forward-right.svg diff --git a/lpot/ux/gui/src/assets/088a-start-solid-gray.svg b/neural_compressor/ux/gui/src/assets/088a-start-solid-gray.svg similarity index 100% rename from lpot/ux/gui/src/assets/088a-start-solid-gray.svg rename to neural_compressor/ux/gui/src/assets/088a-start-solid-gray.svg diff --git a/lpot/ux/gui/src/assets/088a-start-solid-white.svg b/neural_compressor/ux/gui/src/assets/088a-start-solid-white.svg similarity index 100% rename from lpot/ux/gui/src/assets/088a-start-solid-white.svg rename to neural_compressor/ux/gui/src/assets/088a-start-solid-white.svg diff --git a/lpot/ux/gui/src/assets/088a-start-solid.svg b/neural_compressor/ux/gui/src/assets/088a-start-solid.svg similarity index 100% rename from lpot/ux/gui/src/assets/088a-start-solid.svg rename to neural_compressor/ux/gui/src/assets/088a-start-solid.svg diff --git a/lpot/ux/gui/src/assets/145b-document-outlined-white.svg b/neural_compressor/ux/gui/src/assets/145b-document-outlined-white.svg similarity index 100% rename from lpot/ux/gui/src/assets/145b-document-outlined-white.svg rename to neural_compressor/ux/gui/src/assets/145b-document-outlined-white.svg diff --git a/lpot/ux/gui/src/assets/145b-document-outlined.svg b/neural_compressor/ux/gui/src/assets/145b-document-outlined.svg similarity index 100% rename from lpot/ux/gui/src/assets/145b-document-outlined.svg rename to neural_compressor/ux/gui/src/assets/145b-document-outlined.svg diff --git a/lpot/ux/gui/src/assets/146a-copy-solid.svg b/neural_compressor/ux/gui/src/assets/146a-copy-solid.svg similarity index 100% rename from lpot/ux/gui/src/assets/146a-copy-solid.svg rename to neural_compressor/ux/gui/src/assets/146a-copy-solid.svg diff --git a/lpot/ux/gui/src/assets/298a-workflow-process-solid.svg b/neural_compressor/ux/gui/src/assets/298a-workflow-process-solid.svg similarity index 100% rename from lpot/ux/gui/src/assets/298a-workflow-process-solid.svg rename to neural_compressor/ux/gui/src/assets/298a-workflow-process-solid.svg diff --git a/lpot/ux/gui/src/assets/create-new.png b/neural_compressor/ux/gui/src/assets/create-new.png similarity index 100% rename from lpot/ux/gui/src/assets/create-new.png rename to neural_compressor/ux/gui/src/assets/create-new.png diff --git a/lpot/ux/gui/src/assets/fonts/IntelClear_Bd.ttf b/neural_compressor/ux/gui/src/assets/fonts/IntelClear_Bd.ttf similarity index 100% rename from lpot/ux/gui/src/assets/fonts/IntelClear_Bd.ttf rename to neural_compressor/ux/gui/src/assets/fonts/IntelClear_Bd.ttf diff --git a/lpot/ux/gui/src/assets/fonts/IntelClear_Lt.ttf b/neural_compressor/ux/gui/src/assets/fonts/IntelClear_Lt.ttf similarity index 100% rename from lpot/ux/gui/src/assets/fonts/IntelClear_Lt.ttf rename to neural_compressor/ux/gui/src/assets/fonts/IntelClear_Lt.ttf diff --git a/lpot/ux/gui/src/assets/fonts/IntelClear_Rg.ttf b/neural_compressor/ux/gui/src/assets/fonts/IntelClear_Rg.ttf similarity index 100% rename from lpot/ux/gui/src/assets/fonts/IntelClear_Rg.ttf rename to neural_compressor/ux/gui/src/assets/fonts/IntelClear_Rg.ttf diff --git a/lpot/ux/gui/src/assets/fonts/intelone-display-bold.ttf b/neural_compressor/ux/gui/src/assets/fonts/intelone-display-bold.ttf similarity index 100% rename from lpot/ux/gui/src/assets/fonts/intelone-display-bold.ttf rename to neural_compressor/ux/gui/src/assets/fonts/intelone-display-bold.ttf diff --git a/lpot/ux/gui/src/assets/fonts/intelone-display-light.ttf b/neural_compressor/ux/gui/src/assets/fonts/intelone-display-light.ttf similarity index 100% rename from lpot/ux/gui/src/assets/fonts/intelone-display-light.ttf rename to neural_compressor/ux/gui/src/assets/fonts/intelone-display-light.ttf diff --git a/lpot/ux/gui/src/assets/fonts/intelone-display-regular.ttf b/neural_compressor/ux/gui/src/assets/fonts/intelone-display-regular.ttf similarity index 100% rename from lpot/ux/gui/src/assets/fonts/intelone-display-regular.ttf rename to neural_compressor/ux/gui/src/assets/fonts/intelone-display-regular.ttf diff --git a/lpot/ux/gui/src/assets/logo-energyblue-72px.svg b/neural_compressor/ux/gui/src/assets/logo-energyblue-72px.svg similarity index 100% rename from lpot/ux/gui/src/assets/logo-energyblue-72px.svg rename to neural_compressor/ux/gui/src/assets/logo-energyblue-72px.svg diff --git a/lpot/ux/gui/src/assets/model-file.svg b/neural_compressor/ux/gui/src/assets/model-file.svg similarity index 100% rename from lpot/ux/gui/src/assets/model-file.svg rename to neural_compressor/ux/gui/src/assets/model-file.svg diff --git a/lpot/ux/gui/src/assets/model-folder.svg b/neural_compressor/ux/gui/src/assets/model-folder.svg similarity index 100% rename from lpot/ux/gui/src/assets/model-folder.svg rename to neural_compressor/ux/gui/src/assets/model-folder.svg diff --git a/lpot/ux/gui/src/assets/nn.png b/neural_compressor/ux/gui/src/assets/nn.png similarity index 100% rename from lpot/ux/gui/src/assets/nn.png rename to neural_compressor/ux/gui/src/assets/nn.png diff --git a/lpot/ux/gui/src/environments/environment.prod.ts b/neural_compressor/ux/gui/src/environments/environment.prod.ts similarity index 100% rename from lpot/ux/gui/src/environments/environment.prod.ts rename to neural_compressor/ux/gui/src/environments/environment.prod.ts diff --git a/lpot/ux/gui/src/index.html b/neural_compressor/ux/gui/src/index.html similarity index 100% rename from lpot/ux/gui/src/index.html rename to neural_compressor/ux/gui/src/index.html diff --git a/lpot/ux/gui/src/main.ts b/neural_compressor/ux/gui/src/main.ts similarity index 100% rename from lpot/ux/gui/src/main.ts rename to neural_compressor/ux/gui/src/main.ts diff --git a/lpot/ux/gui/src/polyfills.ts b/neural_compressor/ux/gui/src/polyfills.ts similarity index 100% rename from lpot/ux/gui/src/polyfills.ts rename to neural_compressor/ux/gui/src/polyfills.ts diff --git a/lpot/ux/gui/src/styles.scss b/neural_compressor/ux/gui/src/styles.scss similarity index 100% rename from lpot/ux/gui/src/styles.scss rename to neural_compressor/ux/gui/src/styles.scss diff --git a/lpot/ux/gui/src/test.ts b/neural_compressor/ux/gui/src/test.ts similarity index 100% rename from lpot/ux/gui/src/test.ts rename to neural_compressor/ux/gui/src/test.ts diff --git a/lpot/ux/gui/src/variables.scss b/neural_compressor/ux/gui/src/variables.scss similarity index 100% rename from lpot/ux/gui/src/variables.scss rename to neural_compressor/ux/gui/src/variables.scss diff --git a/lpot/ux/gui/tsconfig.app.json b/neural_compressor/ux/gui/tsconfig.app.json similarity index 100% rename from lpot/ux/gui/tsconfig.app.json rename to neural_compressor/ux/gui/tsconfig.app.json diff --git a/lpot/ux/gui/tsconfig.json b/neural_compressor/ux/gui/tsconfig.json similarity index 100% rename from lpot/ux/gui/tsconfig.json rename to neural_compressor/ux/gui/tsconfig.json diff --git a/lpot/ux/gui/tsconfig.spec.json b/neural_compressor/ux/gui/tsconfig.spec.json similarity index 100% rename from lpot/ux/gui/tsconfig.spec.json rename to neural_compressor/ux/gui/tsconfig.spec.json diff --git a/lpot/ux/gui/tslint.json b/neural_compressor/ux/gui/tslint.json similarity index 100% rename from lpot/ux/gui/tslint.json rename to neural_compressor/ux/gui/tslint.json diff --git a/lpot/ux/neural_compressor_bench.py b/neural_compressor/ux/neural_compressor_bench.py similarity index 82% rename from lpot/ux/neural_compressor_bench.py rename to neural_compressor/ux/neural_compressor_bench.py index 530e0e00adc..9e21c041f40 100644 --- a/lpot/ux/neural_compressor_bench.py +++ b/neural_compressor/ux/neural_compressor_bench.py @@ -24,11 +24,11 @@ can_patch_ssl = "ssl" not in sys.modules gevent.monkey.patch_all(ssl=can_patch_ssl) -from lpot.ux.utils.environment import Environment # noqa: E402 -from lpot.ux.utils.exceptions import NotFoundException # noqa: E402 -from lpot.ux.utils.logger import change_log_level # noqa: E402 -from lpot.ux.web.configuration import Configuration # noqa: E402 -from lpot.ux.web.server import run_server # noqa: E402 +from neural_compressor.ux.utils.environment import Environment # noqa: E402 +from neural_compressor.ux.utils.exceptions import NotFoundException # noqa: E402 +from neural_compressor.ux.utils.logger import change_log_level # noqa: E402 +from neural_compressor.ux.web.configuration import Configuration # noqa: E402 +from neural_compressor.ux.web.server import run_server # noqa: E402 def main() -> None: diff --git a/lpot/ux/utils/__init__.py b/neural_compressor/ux/utils/__init__.py similarity index 100% rename from lpot/ux/utils/__init__.py rename to neural_compressor/ux/utils/__init__.py diff --git a/lpot/ux/utils/configs/dataloaders.json b/neural_compressor/ux/utils/configs/dataloaders.json similarity index 100% rename from lpot/ux/utils/configs/dataloaders.json rename to neural_compressor/ux/utils/configs/dataloaders.json diff --git a/lpot/ux/utils/configs/metrics.json b/neural_compressor/ux/utils/configs/metrics.json similarity index 100% rename from lpot/ux/utils/configs/metrics.json rename to neural_compressor/ux/utils/configs/metrics.json diff --git a/lpot/ux/utils/configs/models.json b/neural_compressor/ux/utils/configs/models.json similarity index 100% rename from lpot/ux/utils/configs/models.json rename to neural_compressor/ux/utils/configs/models.json diff --git a/lpot/ux/utils/configs/objectives.json b/neural_compressor/ux/utils/configs/objectives.json similarity index 100% rename from lpot/ux/utils/configs/objectives.json rename to neural_compressor/ux/utils/configs/objectives.json diff --git a/lpot/ux/utils/configs/precisions.json b/neural_compressor/ux/utils/configs/precisions.json similarity index 100% rename from lpot/ux/utils/configs/precisions.json rename to neural_compressor/ux/utils/configs/precisions.json diff --git a/lpot/ux/utils/configs/predefined_configs/onnxrt/image_recognition.yaml b/neural_compressor/ux/utils/configs/predefined_configs/onnxrt/image_recognition.yaml similarity index 95% rename from lpot/ux/utils/configs/predefined_configs/onnxrt/image_recognition.yaml rename to neural_compressor/ux/utils/configs/predefined_configs/onnxrt/image_recognition.yaml index 19106a3e6ff..d1b44b9eb5c 100644 --- a/lpot/ux/utils/configs/predefined_configs/onnxrt/image_recognition.yaml +++ b/neural_compressor/ux/utils/configs/predefined_configs/onnxrt/image_recognition.yaml @@ -37,8 +37,8 @@ quantization: # optional. tuning constrai } } -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/lpot/ux/utils/configs/predefined_configs/onnxrt/nlp.yaml b/neural_compressor/ux/utils/configs/predefined_configs/onnxrt/nlp.yaml similarity index 100% rename from lpot/ux/utils/configs/predefined_configs/onnxrt/nlp.yaml rename to neural_compressor/ux/utils/configs/predefined_configs/onnxrt/nlp.yaml diff --git a/lpot/ux/utils/configs/predefined_configs/tensorflow/image_recognition.yaml b/neural_compressor/ux/utils/configs/predefined_configs/tensorflow/image_recognition.yaml similarity index 95% rename from lpot/ux/utils/configs/predefined_configs/tensorflow/image_recognition.yaml rename to neural_compressor/ux/utils/configs/predefined_configs/tensorflow/image_recognition.yaml index 66387d47216..59f8e5a4a58 100644 --- a/lpot/ux/utils/configs/predefined_configs/tensorflow/image_recognition.yaml +++ b/neural_compressor/ux/utils/configs/predefined_configs/tensorflow/image_recognition.yaml @@ -35,8 +35,8 @@ quantization: # optional. tuning constrai activation: algorithm: minmax -evaluation: # optional. required if user doesn't provide eval_func in lpot.Quantization. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. +evaluation: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: topk: 1 # built-in metrics are topk, map, f1, allow user to register new metric. dataloader: diff --git a/lpot/ux/utils/configs/predefined_configs/tensorflow/nlp.yaml b/neural_compressor/ux/utils/configs/predefined_configs/tensorflow/nlp.yaml similarity index 100% rename from lpot/ux/utils/configs/predefined_configs/tensorflow/nlp.yaml rename to neural_compressor/ux/utils/configs/predefined_configs/tensorflow/nlp.yaml diff --git a/lpot/ux/utils/configs/predefined_configs/tensorflow/object_detection.yaml b/neural_compressor/ux/utils/configs/predefined_configs/tensorflow/object_detection.yaml similarity index 95% rename from lpot/ux/utils/configs/predefined_configs/tensorflow/object_detection.yaml rename to neural_compressor/ux/utils/configs/predefined_configs/tensorflow/object_detection.yaml index f2e7c5f1799..2f1fb1d2f46 100644 --- a/lpot/ux/utils/configs/predefined_configs/tensorflow/object_detection.yaml +++ b/neural_compressor/ux/utils/configs/predefined_configs/tensorflow/object_detection.yaml @@ -22,7 +22,7 @@ model: # mandatory. used to specif quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. calibration: sampling_size: 100 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 100 dataset: COCORecord: @@ -34,13 +34,13 @@ quantization: # optional. tuning constrai algorithm: minmax evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: COCOmAP: configs: # optional. if not specified, use all cores in 1 socket. cores_per_instance: 28 num_of_instance: 1 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 1 dataset: COCORecord: diff --git a/lpot/ux/utils/configs/predefined_configs/tensorflow/object_detection_ssd.yaml b/neural_compressor/ux/utils/configs/predefined_configs/tensorflow/object_detection_ssd.yaml similarity index 96% rename from lpot/ux/utils/configs/predefined_configs/tensorflow/object_detection_ssd.yaml rename to neural_compressor/ux/utils/configs/predefined_configs/tensorflow/object_detection_ssd.yaml index f7b4478b53b..fa9318cc9db 100644 --- a/lpot/ux/utils/configs/predefined_configs/tensorflow/object_detection_ssd.yaml +++ b/neural_compressor/ux/utils/configs/predefined_configs/tensorflow/object_detection_ssd.yaml @@ -22,7 +22,7 @@ model: # mandatory. used to specif quantization: # optional. tuning constraints on model-wise for advance user to reduce tuning space. calibration: sampling_size: 100 # optional. default value is 100. used to set how many samples should be used in calibration. - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 100 dataset: COCORecord: @@ -41,13 +41,13 @@ quantization: # optional. tuning constrai algorithm: minmax evaluation: # optional. used to config evaluation process. - accuracy: # optional. required if user doesn't provide eval_func in lpot.Quantization. + accuracy: # optional. required if user doesn't provide eval_func in neural_compressor.Quantization. metric: COCOmAP: configs: # optional. if not specified, use all cores in 1 socket. cores_per_instance: 28 num_of_instance: 1 - dataloader: # optional. if not specified, user need construct a q_dataloader in code for lpot.Quantization. + dataloader: # optional. if not specified, user need construct a q_dataloader in code for neural_compressor.Quantization. batch_size: 1 dataset: COCORecord: diff --git a/lpot/ux/utils/configs/predefined_configs/tensorflow/recommendation.yaml b/neural_compressor/ux/utils/configs/predefined_configs/tensorflow/recommendation.yaml similarity index 100% rename from lpot/ux/utils/configs/predefined_configs/tensorflow/recommendation.yaml rename to neural_compressor/ux/utils/configs/predefined_configs/tensorflow/recommendation.yaml diff --git a/lpot/ux/utils/configs/strategies.json b/neural_compressor/ux/utils/configs/strategies.json similarity index 100% rename from lpot/ux/utils/configs/strategies.json rename to neural_compressor/ux/utils/configs/strategies.json diff --git a/lpot/ux/utils/configs/transforms.json b/neural_compressor/ux/utils/configs/transforms.json similarity index 100% rename from lpot/ux/utils/configs/transforms.json rename to neural_compressor/ux/utils/configs/transforms.json diff --git a/lpot/ux/utils/configs/transforms_filter.json b/neural_compressor/ux/utils/configs/transforms_filter.json similarity index 100% rename from lpot/ux/utils/configs/transforms_filter.json rename to neural_compressor/ux/utils/configs/transforms_filter.json diff --git a/lpot/ux/utils/consts.py b/neural_compressor/ux/utils/consts.py similarity index 89% rename from lpot/ux/utils/consts.py rename to neural_compressor/ux/utils/consts.py index cc5472bbbda..920d5eb0371 100644 --- a/lpot/ux/utils/consts.py +++ b/neural_compressor/ux/utils/consts.py @@ -15,11 +15,11 @@ """Constant values.""" -from lpot.ux.utils.utils import release_tag +from neural_compressor.ux.utils.utils import release_tag github_info = { "user": "intel", - "repository": "lpot", + "repository": "neural-compressor", "tag": release_tag(), } diff --git a/lpot/ux/utils/environment.py b/neural_compressor/ux/utils/environment.py similarity index 89% rename from lpot/ux/utils/environment.py rename to neural_compressor/ux/utils/environment.py index 37228f2f48a..d1ac175db5e 100644 --- a/lpot/ux/utils/environment.py +++ b/neural_compressor/ux/utils/environment.py @@ -16,9 +16,9 @@ import os import sys -from lpot.ux.utils.templates.workdir import Workdir -from lpot.ux.utils.workload.workload import WorkloadMigrator -from lpot.ux.utils.workload.workloads_list import WorkloadsListMigrator +from neural_compressor.ux.utils.templates.workdir import Workdir +from neural_compressor.ux.utils.workload.workload import WorkloadMigrator +from neural_compressor.ux.utils.workload.workloads_list import WorkloadsListMigrator class Environment: @@ -27,8 +27,8 @@ class Environment: @staticmethod def ensure_workdir_exists_and_writeable() -> None: """Ensure that configured directory exists and can be used.""" - from lpot.ux.utils.logger import log - from lpot.ux.web.configuration import Configuration + from neural_compressor.ux.utils.logger import log + from neural_compressor.ux.web.configuration import Configuration configuration = Configuration() workdir = configuration.workdir diff --git a/lpot/ux/utils/exceptions.py b/neural_compressor/ux/utils/exceptions.py similarity index 100% rename from lpot/ux/utils/exceptions.py rename to neural_compressor/ux/utils/exceptions.py diff --git a/lpot/ux/utils/executor.py b/neural_compressor/ux/utils/executor.py similarity index 96% rename from lpot/ux/utils/executor.py rename to neural_compressor/ux/utils/executor.py index 5d59938e68f..482b631019b 100644 --- a/lpot/ux/utils/executor.py +++ b/neural_compressor/ux/utils/executor.py @@ -19,10 +19,10 @@ from threading import Lock, Thread from typing import Any, List, Optional, Union -from lpot.ux.utils.logger import log -from lpot.ux.utils.proc import Proc -from lpot.ux.utils.processes import LPOTProcesses -from lpot.ux.web.communication import MessageQueue +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.proc import Proc +from neural_compressor.ux.utils.processes import NCProcesses +from neural_compressor.ux.web.communication import MessageQueue LOCK = Lock() @@ -147,7 +147,7 @@ def call( env_args: Optional[list] = None, ignore_exit_codes: Union[list, Any] = None, pid: Optional[str] = None, - ) -> LPOTProcesses: + ) -> NCProcesses: """ Execute multiple calls for process. @@ -167,7 +167,7 @@ def call( self.refresh_workdir() threads = [] - processes = LPOTProcesses() + processes = NCProcesses() if not self.is_multi_commands(args): args = [args] diff --git a/lpot/ux/utils/expiring_dict.py b/neural_compressor/ux/utils/expiring_dict.py similarity index 100% rename from lpot/ux/utils/expiring_dict.py rename to neural_compressor/ux/utils/expiring_dict.py diff --git a/lpot/ux/utils/hw_info.py b/neural_compressor/ux/utils/hw_info.py similarity index 97% rename from lpot/ux/utils/hw_info.py rename to neural_compressor/ux/utils/hw_info.py index d08c6fd36d1..e82b5ba61fe 100644 --- a/lpot/ux/utils/hw_info.py +++ b/neural_compressor/ux/utils/hw_info.py @@ -22,9 +22,9 @@ import cpuinfo import psutil -from lpot.ux.utils.json_serializer import JsonSerializer -from lpot.ux.utils.logger import log -from lpot.ux.utils.utils import determine_ip +from neural_compressor.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.utils import determine_ip class HWInfo(JsonSerializer): diff --git a/lpot/ux/utils/json_serializer.py b/neural_compressor/ux/utils/json_serializer.py similarity index 98% rename from lpot/ux/utils/json_serializer.py rename to neural_compressor/ux/utils/json_serializer.py index 50589e25930..bf973171b1c 100644 --- a/lpot/ux/utils/json_serializer.py +++ b/neural_compressor/ux/utils/json_serializer.py @@ -17,7 +17,7 @@ import re from typing import Any, Dict, List, Optional, Union -from lpot.ux.utils.logger import log +from neural_compressor.ux.utils.logger import log class JsonSerializer: diff --git a/lpot/ux/utils/logger.py b/neural_compressor/ux/utils/logger.py similarity index 100% rename from lpot/ux/utils/logger.py rename to neural_compressor/ux/utils/logger.py diff --git a/lpot/ux/utils/parser.py b/neural_compressor/ux/utils/parser.py similarity index 96% rename from lpot/ux/utils/parser.py rename to neural_compressor/ux/utils/parser.py index 6d30ca87653..ea9624886da 100644 --- a/lpot/ux/utils/parser.py +++ b/neural_compressor/ux/utils/parser.py @@ -18,10 +18,10 @@ from abc import ABC from typing import Any, Dict, List, Union -from lpot.ux.components.benchmark import Benchmarks -from lpot.ux.utils.exceptions import InternalException -from lpot.ux.utils.logger import log -from lpot.ux.utils.templates.metric import Metric +from neural_compressor.ux.components.benchmark import Benchmarks +from neural_compressor.ux.utils.exceptions import InternalException +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.templates.metric import Metric class Parser(ABC): diff --git a/lpot/ux/utils/proc.py b/neural_compressor/ux/utils/proc.py similarity index 99% rename from lpot/ux/utils/proc.py rename to neural_compressor/ux/utils/proc.py index 2a08a0eb97b..ff0bb417d63 100644 --- a/lpot/ux/utils/proc.py +++ b/neural_compressor/ux/utils/proc.py @@ -24,7 +24,7 @@ from contextlib import ExitStack from typing import Any, Dict, Iterable, Iterator, List, Optional, Union -from lpot.ux.utils.logger import log +from neural_compressor.ux.utils.logger import log class Proc(object): diff --git a/lpot/ux/utils/processes.py b/neural_compressor/ux/utils/processes.py similarity index 78% rename from lpot/ux/utils/processes.py rename to neural_compressor/ux/utils/processes.py index b5a184e079e..19917253d4a 100644 --- a/lpot/ux/utils/processes.py +++ b/neural_compressor/ux/utils/processes.py @@ -16,7 +16,7 @@ """Execution common process module.""" -class LPOTProcesses(list): +class NCProcesses(list): """ Processes class aggregates Process list. @@ -31,8 +31,8 @@ def return_code_all(self) -> None: :return: List of int with process return codes. """ return_codes = [] - for lpot_process in self: - return_codes.append(lpot_process.return_code) + for nc_process in self: + return_codes.append(nc_process.return_code) @property def is_ok(self) -> bool: @@ -42,8 +42,8 @@ def is_ok(self) -> bool: :rtype : bool :return: return False if at least one of processes failed, in other case return True """ - for lpot_process in self: - if not lpot_process.is_ok: + for nc_process in self: + if not nc_process.is_ok: return False return True @@ -54,15 +54,15 @@ def __str__(self) -> str: :rtype : list :return: list of processes args and int with process return codes """ - Lpot_processes = [] - for lpot_process in self: - Lpot_processes.append( - "LpotProc(\n\tcmd={}\n\treturn_code={}".format( - " ".join(map(str, lpot_process.args)), - lpot_process.return_code, + NC_processes = [] + for nc_process in self: + NC_processes.append( + "NCProc(\n\tcmd={}\n\treturn_code={}".format( + " ".join(map(str, nc_process.args)), + nc_process.return_code, ), ) - return "\n".join(Lpot_processes) + return "\n".join(NC_processes) def remove_successful_logs(self) -> None: """Remove call logs if all statuses are successful.""" diff --git a/lpot/ux/utils/templates/__init__.py b/neural_compressor/ux/utils/templates/__init__.py similarity index 100% rename from lpot/ux/utils/templates/__init__.py rename to neural_compressor/ux/utils/templates/__init__.py diff --git a/lpot/ux/utils/templates/dataloader_and_metric_template.txt b/neural_compressor/ux/utils/templates/dataloader_and_metric_template.txt similarity index 90% rename from lpot/ux/utils/templates/dataloader_and_metric_template.txt rename to neural_compressor/ux/utils/templates/dataloader_and_metric_template.txt index a60d357fdfe..46df9c80c30 100644 --- a/lpot/ux/utils/templates/dataloader_and_metric_template.txt +++ b/neural_compressor/ux/utils/templates/dataloader_and_metric_template.txt @@ -16,8 +16,8 @@ class Dataset(object): pass # Define a customized Metric function -from lpot.experimental import Quantization, common -from lpot.experimental.metric import BaseMetric +from neural_compressor.experimental import Quantization, common +from neural_compressor.experimental.metric import BaseMetric class MyMetric(BaseMetric): diff --git a/lpot/ux/utils/templates/dataloader_template.txt b/neural_compressor/ux/utils/templates/dataloader_template.txt similarity index 92% rename from lpot/ux/utils/templates/dataloader_template.txt rename to neural_compressor/ux/utils/templates/dataloader_template.txt index 457dffe82c9..79c740b4750 100644 --- a/lpot/ux/utils/templates/dataloader_template.txt +++ b/neural_compressor/ux/utils/templates/dataloader_template.txt @@ -1,4 +1,4 @@ -from lpot.experimental import Quantization, common +from neural_compressor.experimental import Quantization, common class Dataset(object): def __init__(self, *args): diff --git a/lpot/ux/utils/templates/metric.py b/neural_compressor/ux/utils/templates/metric.py similarity index 98% rename from lpot/ux/utils/templates/metric.py rename to neural_compressor/ux/utils/templates/metric.py index 88d76841eee..531954ecc7d 100644 --- a/lpot/ux/utils/templates/metric.py +++ b/neural_compressor/ux/utils/templates/metric.py @@ -16,7 +16,7 @@ from typing import Optional -from lpot.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.json_serializer import JsonSerializer DIGITS = 4 diff --git a/lpot/ux/utils/templates/metric_template.txt b/neural_compressor/ux/utils/templates/metric_template.txt similarity index 80% rename from lpot/ux/utils/templates/metric_template.txt rename to neural_compressor/ux/utils/templates/metric_template.txt index ad384fa568b..441bd3077cd 100644 --- a/lpot/ux/utils/templates/metric_template.txt +++ b/neural_compressor/ux/utils/templates/metric_template.txt @@ -1,6 +1,6 @@ # Define a customized Metric function -from lpot.experimental import Quantization, common -from lpot.experimental.metric import BaseMetric +from neural_compressor.experimental import Quantization, common +from neural_compressor.experimental.metric import BaseMetric class MyMetric(BaseMetric): diff --git a/lpot/ux/utils/templates/workdir.py b/neural_compressor/ux/utils/templates/workdir.py similarity index 97% rename from lpot/ux/utils/templates/workdir.py rename to neural_compressor/ux/utils/templates/workdir.py index 3a64a6d234e..646ab161265 100644 --- a/lpot/ux/utils/templates/workdir.py +++ b/neural_compressor/ux/utils/templates/workdir.py @@ -21,9 +21,9 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Union -from lpot.ux.utils.templates.metric import Metric -from lpot.ux.utils.workload.workloads_list import WorkloadInfo -from lpot.ux.web.configuration import Configuration +from neural_compressor.ux.utils.templates.metric import Metric +from neural_compressor.ux.utils.workload.workloads_list import WorkloadInfo +from neural_compressor.ux.web.configuration import Configuration class Workdir: @@ -45,7 +45,7 @@ def __init__( """Initialize workdir class.""" configuration = Configuration() workspace_path = configuration.workdir - self.workdir_path = os.path.join(os.environ.get("HOME", ""), ".lpot") + self.workdir_path = os.path.join(os.environ.get("HOME", ""), ".neural_compressor") self.ensure_working_path_exists() self.workloads_json = os.path.join(self.workdir_path, "workloads_list.json") self.request_id = request_id diff --git a/lpot/ux/utils/utils.py b/neural_compressor/ux/utils/utils.py similarity index 94% rename from lpot/ux/utils/utils.py rename to neural_compressor/ux/utils/utils.py index f8771cc856c..5ce496f28cd 100644 --- a/lpot/ux/utils/utils.py +++ b/neural_compressor/ux/utils/utils.py @@ -23,10 +23,10 @@ from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union -from lpot.ux.utils.exceptions import AccessDeniedException, ClientErrorException, NotFoundException -from lpot.ux.utils.logger import log -from lpot.ux.utils.proc import Proc -from lpot.version import __version__ as lpot_version +from neural_compressor.ux.utils.exceptions import AccessDeniedException, ClientErrorException, NotFoundException +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.proc import Proc +from neural_compressor.version import __version__ as nc_version dataset_locations = { "tensorflow": { @@ -73,7 +73,7 @@ def get_framework_from_path(model_path: str) -> Optional[str]: :param model_path: Path to model. """ - from lpot.ux.components.model.repository import ModelRepository + from neural_compressor.ux.components.model.repository import ModelRepository model_repository = ModelRepository() try: @@ -307,7 +307,7 @@ def load_precisions_config() -> dict: return _load_json_as_dict(json_path) -def load_help_lpot_params(parameter: str) -> Dict[str, Any]: +def load_help_nc_params(parameter: str) -> Dict[str, Any]: """Load help info from json for metrics, objectives and strategies.""" json_path = os.path.join( os.path.dirname(__file__), @@ -375,8 +375,8 @@ def determine_ip() -> str: def is_development_env() -> bool: - """Return true if LPOT_MODE is development else false.""" - return os.environ.get("LPOT_MODE") == "development" + """Return true if NC_MODE is development else false.""" + return os.environ.get("NC_MODE") == "development" def filter_transforms( @@ -420,10 +420,10 @@ def release_tag() -> str: """Build tag based on release version.""" version_pattern = r"^(?P[0-9]+(\.[0-9]+)*).*?$" version_regex = re.compile(version_pattern) - matches = version_regex.search(lpot_version) + matches = version_regex.search(nc_version) if matches is None: - raise ValueError(f"Unable to parse version {lpot_version}") + raise ValueError(f"Unable to parse version {nc_version}") release_version = matches.groupdict().get("release") return f"v{release_version}" diff --git a/lpot/ux/utils/workload/README.md b/neural_compressor/ux/utils/workload/README.md similarity index 94% rename from lpot/ux/utils/workload/README.md rename to neural_compressor/ux/utils/workload/README.md index 2260ae49160..0d94ed6f58d 100644 --- a/lpot/ux/utils/workload/README.md +++ b/neural_compressor/ux/utils/workload/README.md @@ -19,7 +19,7 @@ Example init data: While creating workload object it search for predefined config for specified **framework** and **domain** and updates such predefined config with passed values. ```python -from lpot.ux.utils.workload.workload import Workload +from neural_compressor.ux.utils.workload.workload import Workload workload = Workload({ "framework": "tensorflow", diff --git a/lpot/ux/utils/workload/__init__.py b/neural_compressor/ux/utils/workload/__init__.py similarity index 100% rename from lpot/ux/utils/workload/__init__.py rename to neural_compressor/ux/utils/workload/__init__.py diff --git a/lpot/ux/utils/workload/config.py b/neural_compressor/ux/utils/workload/config.py similarity index 95% rename from lpot/ux/utils/workload/config.py rename to neural_compressor/ux/utils/workload/config.py index 8fc3e474e7b..29506ab0939 100644 --- a/lpot/ux/utils/workload/config.py +++ b/neural_compressor/ux/utils/workload/config.py @@ -19,18 +19,18 @@ import yaml -from lpot.ux.utils.exceptions import ClientErrorException -from lpot.ux.utils.json_serializer import JsonSerializer -from lpot.ux.utils.logger import log -from lpot.ux.utils.utils import load_precisions_config -from lpot.ux.utils.workload.dataloader import Transform -from lpot.ux.utils.workload.evaluation import Configs, Evaluation -from lpot.ux.utils.workload.graph_optimization import GraphOptimization -from lpot.ux.utils.workload.model import Model -from lpot.ux.utils.workload.pruning import Pruning -from lpot.ux.utils.workload.quantization import Quantization -from lpot.ux.utils.workload.tuning import Tuning -from lpot.ux.utils.yaml_utils import float_representer +from neural_compressor.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.utils import load_precisions_config +from neural_compressor.ux.utils.workload.dataloader import Transform +from neural_compressor.ux.utils.workload.evaluation import Configs, Evaluation +from neural_compressor.ux.utils.workload.graph_optimization import GraphOptimization +from neural_compressor.ux.utils.workload.model import Model +from neural_compressor.ux.utils.workload.pruning import Pruning +from neural_compressor.ux.utils.workload.quantization import Quantization +from neural_compressor.ux.utils.workload.tuning import Tuning +from neural_compressor.ux.utils.yaml_utils import float_representer class Config(JsonSerializer): diff --git a/lpot/ux/utils/workload/dataloader.py b/neural_compressor/ux/utils/workload/dataloader.py similarity index 96% rename from lpot/ux/utils/workload/dataloader.py rename to neural_compressor/ux/utils/workload/dataloader.py index d71a4ace4ce..9d10f6d1621 100644 --- a/lpot/ux/utils/workload/dataloader.py +++ b/neural_compressor/ux/utils/workload/dataloader.py @@ -17,8 +17,8 @@ from collections import OrderedDict from typing import Any, Dict, Optional -from lpot.ux.utils.exceptions import ClientErrorException -from lpot.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.json_serializer import JsonSerializer class Dataset(JsonSerializer): diff --git a/lpot/ux/utils/workload/evaluation.py b/neural_compressor/ux/utils/workload/evaluation.py similarity index 96% rename from lpot/ux/utils/workload/evaluation.py rename to neural_compressor/ux/utils/workload/evaluation.py index 1ba649064dc..e03035561ad 100644 --- a/lpot/ux/utils/workload/evaluation.py +++ b/neural_compressor/ux/utils/workload/evaluation.py @@ -16,9 +16,9 @@ from typing import Any, Dict, List, Optional, Union -from lpot.ux.utils.hw_info import HWInfo -from lpot.ux.utils.json_serializer import JsonSerializer -from lpot.ux.utils.workload.dataloader import Dataloader +from neural_compressor.ux.utils.hw_info import HWInfo +from neural_compressor.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.workload.dataloader import Dataloader class Metric(JsonSerializer): diff --git a/lpot/ux/utils/workload/graph_optimization.py b/neural_compressor/ux/utils/workload/graph_optimization.py similarity index 92% rename from lpot/ux/utils/workload/graph_optimization.py rename to neural_compressor/ux/utils/workload/graph_optimization.py index c01f74b4706..2b19bde8e84 100644 --- a/lpot/ux/utils/workload/graph_optimization.py +++ b/neural_compressor/ux/utils/workload/graph_optimization.py @@ -16,8 +16,8 @@ from typing import Any, Dict, List, Optional, Union -from lpot.ux.utils.exceptions import ClientErrorException -from lpot.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.json_serializer import JsonSerializer class GraphOptimization(JsonSerializer): diff --git a/lpot/ux/utils/workload/model.py b/neural_compressor/ux/utils/workload/model.py similarity index 97% rename from lpot/ux/utils/workload/model.py rename to neural_compressor/ux/utils/workload/model.py index 735fad00b83..f3ef78216e6 100644 --- a/lpot/ux/utils/workload/model.py +++ b/neural_compressor/ux/utils/workload/model.py @@ -16,7 +16,7 @@ from typing import Any, Dict, List, Union -from lpot.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.json_serializer import JsonSerializer class Model(JsonSerializer): diff --git a/lpot/ux/utils/workload/pruning.py b/neural_compressor/ux/utils/workload/pruning.py similarity index 96% rename from lpot/ux/utils/workload/pruning.py rename to neural_compressor/ux/utils/workload/pruning.py index 66e8f431de7..a665bb54662 100644 --- a/lpot/ux/utils/workload/pruning.py +++ b/neural_compressor/ux/utils/workload/pruning.py @@ -16,7 +16,7 @@ from typing import Any, Dict, List, Optional -from lpot.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.json_serializer import JsonSerializer class Magnitude(JsonSerializer): diff --git a/lpot/ux/utils/workload/quantization.py b/neural_compressor/ux/utils/workload/quantization.py similarity index 96% rename from lpot/ux/utils/workload/quantization.py rename to neural_compressor/ux/utils/workload/quantization.py index 9c18222282a..9be395ddc30 100644 --- a/lpot/ux/utils/workload/quantization.py +++ b/neural_compressor/ux/utils/workload/quantization.py @@ -16,8 +16,8 @@ from typing import Any, Dict, List, Union -from lpot.ux.utils.json_serializer import JsonSerializer -from lpot.ux.utils.workload.dataloader import Dataloader +from neural_compressor.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.workload.dataloader import Dataloader class Calibration(JsonSerializer): diff --git a/lpot/ux/utils/workload/tuning.py b/neural_compressor/ux/utils/workload/tuning.py similarity index 94% rename from lpot/ux/utils/workload/tuning.py rename to neural_compressor/ux/utils/workload/tuning.py index 277c9c1cb02..ea0f796c208 100644 --- a/lpot/ux/utils/workload/tuning.py +++ b/neural_compressor/ux/utils/workload/tuning.py @@ -16,9 +16,9 @@ from typing import Any, Dict, Optional -from lpot.ux.utils.exceptions import ClientErrorException -from lpot.ux.utils.json_serializer import JsonSerializer -from lpot.ux.utils.utils import parse_bool_value +from neural_compressor.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.utils import parse_bool_value class Strategy(JsonSerializer): @@ -27,7 +27,7 @@ class Strategy(JsonSerializer): def __init__(self, data: Dict[str, Any] = {}) -> None: """Initialize configuration Strategy class.""" super().__init__() - # [Required] One of lpot.strategy.STRATEGIES + # [Required] One of neural_compressor.strategy.STRATEGIES self.name: str = data.get("name", "basic") self.sigopt_api_token: Optional[str] = data.get("sigopt_api_token", None) @@ -89,7 +89,7 @@ def __init__(self, data: Dict[str, Any] = {}) -> None: data.get("accuracy_criterion", {}), ) - # [Optional] One of lpot.objective.OBJECTIVES + # [Optional] One of neural_compressor.objective.OBJECTIVES self.objective: Optional[str] = data.get("objective", None) self.exit_policy: Optional[ExitPolicy] = None # [Optional] diff --git a/lpot/ux/utils/workload/workload.py b/neural_compressor/ux/utils/workload/workload.py similarity index 95% rename from lpot/ux/utils/workload/workload.py rename to neural_compressor/ux/utils/workload/workload.py index d7f87d12f26..1fd5aad1679 100644 --- a/lpot/ux/utils/workload/workload.py +++ b/neural_compressor/ux/utils/workload/workload.py @@ -21,19 +21,19 @@ from pathlib import Path from typing import Any, Dict, Optional -from lpot.ux.components.model.repository import ModelRepository -from lpot.ux.components.optimization import Optimizations -from lpot.ux.utils.consts import Precisions -from lpot.ux.utils.exceptions import ClientErrorException, InternalException -from lpot.ux.utils.json_serializer import JsonSerializer -from lpot.ux.utils.logger import log -from lpot.ux.utils.utils import ( +from neural_compressor.ux.components.model.repository import ModelRepository +from neural_compressor.ux.components.optimization import Optimizations +from neural_compressor.ux.utils.consts import Precisions +from neural_compressor.ux.utils.exceptions import ClientErrorException, InternalException +from neural_compressor.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.utils import ( get_file_extension, get_framework_from_path, get_predefined_config_path, ) -from lpot.ux.utils.workload.config import Config -from lpot.ux.web.configuration import Configuration +from neural_compressor.ux.utils.workload.config import Config +from neural_compressor.ux.web.configuration import Configuration class Workload(JsonSerializer): diff --git a/lpot/ux/utils/workload/workloads_list.py b/neural_compressor/ux/utils/workload/workloads_list.py similarity index 97% rename from lpot/ux/utils/workload/workloads_list.py rename to neural_compressor/ux/utils/workload/workloads_list.py index d4183622d18..8d68b2110b1 100644 --- a/lpot/ux/utils/workload/workloads_list.py +++ b/neural_compressor/ux/utils/workload/workloads_list.py @@ -20,11 +20,11 @@ import re from typing import Any, Dict, List, Optional, Tuple, Union -from lpot.ux.utils.exceptions import InternalException -from lpot.ux.utils.json_serializer import JsonSerializer -from lpot.ux.utils.logger import log -from lpot.ux.utils.templates.metric import Metric -from lpot.ux.utils.utils import get_size +from neural_compressor.ux.utils.exceptions import InternalException +from neural_compressor.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.utils.templates.metric import Metric +from neural_compressor.ux.utils.utils import get_size logging.basicConfig(level=logging.INFO) @@ -128,7 +128,7 @@ def __init__(self) -> None: """Initialize workloads list migrator.""" self.workloads_json = os.path.join( os.environ.get("HOME", ""), - ".lpot", + ".neural_compressor", "workloads_list.json", ) self.workloads_data: dict = {} diff --git a/lpot/ux/utils/yaml_utils.py b/neural_compressor/ux/utils/yaml_utils.py similarity index 100% rename from lpot/ux/utils/yaml_utils.py rename to neural_compressor/ux/utils/yaml_utils.py diff --git a/lpot/ux/web/__init__.py b/neural_compressor/ux/web/__init__.py similarity index 100% rename from lpot/ux/web/__init__.py rename to neural_compressor/ux/web/__init__.py diff --git a/lpot/ux/web/communication.py b/neural_compressor/ux/web/communication.py similarity index 100% rename from lpot/ux/web/communication.py rename to neural_compressor/ux/web/communication.py diff --git a/lpot/ux/web/configuration.py b/neural_compressor/ux/web/configuration.py similarity index 97% rename from lpot/ux/web/configuration.py rename to neural_compressor/ux/web/configuration.py index 21c2022409a..6c182ec6534 100644 --- a/lpot/ux/web/configuration.py +++ b/neural_compressor/ux/web/configuration.py @@ -25,8 +25,8 @@ from numpy.random import randint -from lpot.utils.utility import singleton -from lpot.ux.utils.exceptions import NotFoundException +from neural_compressor.utils.utility import singleton +from neural_compressor.ux.utils.exceptions import NotFoundException @singleton @@ -67,7 +67,7 @@ def determine_values_from_existing_config(self) -> None: """Set variables based on existing files.""" workloads_list_filepath = os.path.join( os.environ.get("HOME", ""), - ".lpot", + ".neural_compressor", "workloads_list.json", ) if os.path.isfile(workloads_list_filepath): diff --git a/lpot/ux/web/exceptions.py b/neural_compressor/ux/web/exceptions.py similarity index 91% rename from lpot/ux/web/exceptions.py rename to neural_compressor/ux/web/exceptions.py index 9c25ed4ff53..bfba680df7d 100644 --- a/lpot/ux/web/exceptions.py +++ b/neural_compressor/ux/web/exceptions.py @@ -15,7 +15,7 @@ """Web Exceptions.""" -from lpot.ux.utils.exceptions import NotFoundException +from neural_compressor.ux.utils.exceptions import NotFoundException class ServiceNotFoundException(NotFoundException): diff --git a/lpot/ux/web/router.py b/neural_compressor/ux/web/router.py similarity index 77% rename from lpot/ux/web/router.py rename to neural_compressor/ux/web/router.py index 78cbfe28c4d..6a53f5a61ef 100644 --- a/lpot/ux/web/router.py +++ b/neural_compressor/ux/web/router.py @@ -19,24 +19,24 @@ from werkzeug.wrappers import Response as WebResponse -from lpot.ux.components.benchmark.execute_benchmark import execute_benchmark -from lpot.ux.components.configuration_wizard.get_boundary_nodes import get_boundary_nodes -from lpot.ux.components.configuration_wizard.get_configuration import get_predefined_configuration -from lpot.ux.components.configuration_wizard.params_feeder import get_possible_values -from lpot.ux.components.configuration_wizard.save_workload import save_workload -from lpot.ux.components.file_browser.file_browser import get_directory_entries -from lpot.ux.components.graph.graph import Graph -from lpot.ux.components.graph.graph_reader import GraphReader -from lpot.ux.components.manage_workspace import get_default_path, get_workloads_list, set_workspace -from lpot.ux.components.model_zoo.list_models import list_models -from lpot.ux.components.model_zoo.save_workload import save_workload as save_example_workload -from lpot.ux.components.optimization.execute_optimization import execute_optimization -from lpot.ux.utils.hw_info import HWInfo -from lpot.ux.utils.json_serializer import JsonSerializer -from lpot.ux.web.communication import Request, Response, create_simple_response -from lpot.ux.web.exceptions import ServiceNotFoundException -from lpot.ux.web.service.request_data_processor import RequestDataProcessor -from lpot.ux.web.service.workload import WorkloadService +from neural_compressor.ux.components.benchmark.execute_benchmark import execute_benchmark +from neural_compressor.ux.components.configuration_wizard.get_boundary_nodes import get_boundary_nodes +from neural_compressor.ux.components.configuration_wizard.get_configuration import get_predefined_configuration +from neural_compressor.ux.components.configuration_wizard.params_feeder import get_possible_values +from neural_compressor.ux.components.configuration_wizard.save_workload import save_workload +from neural_compressor.ux.components.file_browser.file_browser import get_directory_entries +from neural_compressor.ux.components.graph.graph import Graph +from neural_compressor.ux.components.graph.graph_reader import GraphReader +from neural_compressor.ux.components.manage_workspace import get_default_path, get_workloads_list, set_workspace +from neural_compressor.ux.components.model_zoo.list_models import list_models +from neural_compressor.ux.components.model_zoo.save_workload import save_workload as save_example_workload +from neural_compressor.ux.components.optimization.execute_optimization import execute_optimization +from neural_compressor.ux.utils.hw_info import HWInfo +from neural_compressor.ux.utils.json_serializer import JsonSerializer +from neural_compressor.ux.web.communication import Request, Response, create_simple_response +from neural_compressor.ux.web.exceptions import ServiceNotFoundException +from neural_compressor.ux.web.service.request_data_processor import RequestDataProcessor +from neural_compressor.ux.web.service.workload import WorkloadService class RoutingDefinition: diff --git a/lpot/ux/web/server.py b/neural_compressor/ux/web/server.py similarity index 92% rename from lpot/ux/web/server.py rename to neural_compressor/ux/web/server.py index 8be2e272aa6..ac11d21d5c4 100644 --- a/lpot/ux/web/server.py +++ b/neural_compressor/ux/web/server.py @@ -26,12 +26,12 @@ from flask_socketio import SocketIO from werkzeug.wrappers import Response as WebResponse -from lpot.ux.utils.exceptions import InternalException -from lpot.ux.utils.logger import log -from lpot.ux.web.communication import MessageQueue, Request -from lpot.ux.web.configuration import Configuration -from lpot.ux.web.router import Router -from lpot.ux.web.service.response_generator import ResponseGenerator +from neural_compressor.ux.utils.exceptions import InternalException +from neural_compressor.ux.utils.logger import log +from neural_compressor.ux.web.communication import MessageQueue, Request +from neural_compressor.ux.web.configuration import Configuration +from neural_compressor.ux.web.router import Router +from neural_compressor.ux.web.service.response_generator import ResponseGenerator app = Flask(__name__, static_url_path="") socketio = SocketIO() diff --git a/lpot/ux/web/service/__init__.py b/neural_compressor/ux/web/service/__init__.py similarity index 100% rename from lpot/ux/web/service/__init__.py rename to neural_compressor/ux/web/service/__init__.py diff --git a/lpot/ux/web/service/history_snapshot_parser.py b/neural_compressor/ux/web/service/history_snapshot_parser.py similarity index 100% rename from lpot/ux/web/service/history_snapshot_parser.py rename to neural_compressor/ux/web/service/history_snapshot_parser.py diff --git a/lpot/ux/web/service/request_data_processor.py b/neural_compressor/ux/web/service/request_data_processor.py similarity index 93% rename from lpot/ux/web/service/request_data_processor.py rename to neural_compressor/ux/web/service/request_data_processor.py index 64958d167ec..70123990efb 100644 --- a/lpot/ux/web/service/request_data_processor.py +++ b/neural_compressor/ux/web/service/request_data_processor.py @@ -17,7 +17,7 @@ from typing import Any, Dict -from lpot.ux.utils.exceptions import ClientErrorException +from neural_compressor.ux.utils.exceptions import ClientErrorException class RequestDataProcessor: diff --git a/lpot/ux/web/service/response_generator.py b/neural_compressor/ux/web/service/response_generator.py similarity index 95% rename from lpot/ux/web/service/response_generator.py rename to neural_compressor/ux/web/service/response_generator.py index 69fde332860..2f0b6382841 100644 --- a/lpot/ux/web/service/response_generator.py +++ b/neural_compressor/ux/web/service/response_generator.py @@ -19,13 +19,13 @@ from flask import send_file from werkzeug.wrappers import Response -from lpot.ux.utils.exceptions import ( +from neural_compressor.ux.utils.exceptions import ( AccessDeniedException, ClientErrorException, InternalException, NotFoundException, ) -from lpot.ux.utils.utils import verify_file_path +from neural_compressor.ux.utils.utils import verify_file_path class ResponseGenerator: diff --git a/lpot/ux/web/service/workload.py b/neural_compressor/ux/web/service/workload.py similarity index 88% rename from lpot/ux/web/service/workload.py rename to neural_compressor/ux/web/service/workload.py index d233fbf9bd6..528799cef63 100644 --- a/lpot/ux/web/service/workload.py +++ b/neural_compressor/ux/web/service/workload.py @@ -14,12 +14,12 @@ # limitations under the License. """Workload service.""" -from lpot.ux.components.optimization.tuning_history import tuning_history -from lpot.ux.utils.exceptions import NotFoundException -from lpot.ux.utils.templates.workdir import Workdir -from lpot.ux.web.communication import MessageQueue -from lpot.ux.web.service.request_data_processor import RequestDataProcessor -from lpot.ux.web.service.response_generator import Response, ResponseGenerator +from neural_compressor.ux.components.optimization.tuning_history import tuning_history +from neural_compressor.ux.utils.exceptions import NotFoundException +from neural_compressor.ux.utils.templates.workdir import Workdir +from neural_compressor.ux.web.communication import MessageQueue +from neural_compressor.ux.web.service.request_data_processor import RequestDataProcessor +from neural_compressor.ux.web.service.response_generator import Response, ResponseGenerator mq = MessageQueue() diff --git a/lpot/ux/web/static/3rdpartylicenses.txt b/neural_compressor/ux/web/static/3rdpartylicenses.txt similarity index 100% rename from lpot/ux/web/static/3rdpartylicenses.txt rename to neural_compressor/ux/web/static/3rdpartylicenses.txt diff --git a/lpot/ux/web/static/assets/004a-information-solid.svg b/neural_compressor/ux/web/static/assets/004a-information-solid.svg similarity index 100% rename from lpot/ux/web/static/assets/004a-information-solid.svg rename to neural_compressor/ux/web/static/assets/004a-information-solid.svg diff --git a/lpot/ux/web/static/assets/005a-help-solid-gray.svg b/neural_compressor/ux/web/static/assets/005a-help-solid-gray.svg similarity index 100% rename from lpot/ux/web/static/assets/005a-help-solid-gray.svg rename to neural_compressor/ux/web/static/assets/005a-help-solid-gray.svg diff --git a/lpot/ux/web/static/assets/005a-help-solid.svg b/neural_compressor/ux/web/static/assets/005a-help-solid.svg similarity index 100% rename from lpot/ux/web/static/assets/005a-help-solid.svg rename to neural_compressor/ux/web/static/assets/005a-help-solid.svg diff --git a/lpot/ux/web/static/assets/006a-alert-solid-red.svg b/neural_compressor/ux/web/static/assets/006a-alert-solid-red.svg similarity index 100% rename from lpot/ux/web/static/assets/006a-alert-solid-red.svg rename to neural_compressor/ux/web/static/assets/006a-alert-solid-red.svg diff --git a/lpot/ux/web/static/assets/007a-minus-solid.svg b/neural_compressor/ux/web/static/assets/007a-minus-solid.svg similarity index 100% rename from lpot/ux/web/static/assets/007a-minus-solid.svg rename to neural_compressor/ux/web/static/assets/007a-minus-solid.svg diff --git a/lpot/ux/web/static/assets/008a-plus-solid-blue.svg b/neural_compressor/ux/web/static/assets/008a-plus-solid-blue.svg similarity index 100% rename from lpot/ux/web/static/assets/008a-plus-solid-blue.svg rename to neural_compressor/ux/web/static/assets/008a-plus-solid-blue.svg diff --git a/lpot/ux/web/static/assets/008a-plus-solid.svg b/neural_compressor/ux/web/static/assets/008a-plus-solid.svg similarity index 100% rename from lpot/ux/web/static/assets/008a-plus-solid.svg rename to neural_compressor/ux/web/static/assets/008a-plus-solid.svg diff --git a/lpot/ux/web/static/assets/009a-close-solid.svg b/neural_compressor/ux/web/static/assets/009a-close-solid.svg similarity index 100% rename from lpot/ux/web/static/assets/009a-close-solid.svg rename to neural_compressor/ux/web/static/assets/009a-close-solid.svg diff --git a/lpot/ux/web/static/assets/010a-passed-completed-solid.svg b/neural_compressor/ux/web/static/assets/010a-passed-completed-solid.svg similarity index 100% rename from lpot/ux/web/static/assets/010a-passed-completed-solid.svg rename to neural_compressor/ux/web/static/assets/010a-passed-completed-solid.svg diff --git a/lpot/ux/web/static/assets/016-edit.svg b/neural_compressor/ux/web/static/assets/016-edit.svg similarity index 100% rename from lpot/ux/web/static/assets/016-edit.svg rename to neural_compressor/ux/web/static/assets/016-edit.svg diff --git a/lpot/ux/web/static/assets/050a-folder-solid-white.svg b/neural_compressor/ux/web/static/assets/050a-folder-solid-white.svg similarity index 100% rename from lpot/ux/web/static/assets/050a-folder-solid-white.svg rename to neural_compressor/ux/web/static/assets/050a-folder-solid-white.svg diff --git a/lpot/ux/web/static/assets/050a-folder-solid.svg b/neural_compressor/ux/web/static/assets/050a-folder-solid.svg similarity index 100% rename from lpot/ux/web/static/assets/050a-folder-solid.svg rename to neural_compressor/ux/web/static/assets/050a-folder-solid.svg diff --git a/lpot/ux/web/static/assets/056a-save-solid-white.svg b/neural_compressor/ux/web/static/assets/056a-save-solid-white.svg similarity index 100% rename from lpot/ux/web/static/assets/056a-save-solid-white.svg rename to neural_compressor/ux/web/static/assets/056a-save-solid-white.svg diff --git a/lpot/ux/web/static/assets/057b-trash-outlined.svg b/neural_compressor/ux/web/static/assets/057b-trash-outlined.svg similarity index 100% rename from lpot/ux/web/static/assets/057b-trash-outlined.svg rename to neural_compressor/ux/web/static/assets/057b-trash-outlined.svg diff --git a/lpot/ux/web/static/assets/073-menu.svg b/neural_compressor/ux/web/static/assets/073-menu.svg similarity index 100% rename from lpot/ux/web/static/assets/073-menu.svg rename to neural_compressor/ux/web/static/assets/073-menu.svg diff --git a/lpot/ux/web/static/assets/074-rewind-reverse.svg b/neural_compressor/ux/web/static/assets/074-rewind-reverse.svg similarity index 100% rename from lpot/ux/web/static/assets/074-rewind-reverse.svg rename to neural_compressor/ux/web/static/assets/074-rewind-reverse.svg diff --git a/lpot/ux/web/static/assets/077-arrow-up.svg b/neural_compressor/ux/web/static/assets/077-arrow-up.svg similarity index 100% rename from lpot/ux/web/static/assets/077-arrow-up.svg rename to neural_compressor/ux/web/static/assets/077-arrow-up.svg diff --git a/lpot/ux/web/static/assets/083-arrow-forward-right.svg b/neural_compressor/ux/web/static/assets/083-arrow-forward-right.svg similarity index 100% rename from lpot/ux/web/static/assets/083-arrow-forward-right.svg rename to neural_compressor/ux/web/static/assets/083-arrow-forward-right.svg diff --git a/lpot/ux/web/static/assets/088a-start-solid-gray.svg b/neural_compressor/ux/web/static/assets/088a-start-solid-gray.svg similarity index 100% rename from lpot/ux/web/static/assets/088a-start-solid-gray.svg rename to neural_compressor/ux/web/static/assets/088a-start-solid-gray.svg diff --git a/lpot/ux/web/static/assets/088a-start-solid-white.svg b/neural_compressor/ux/web/static/assets/088a-start-solid-white.svg similarity index 100% rename from lpot/ux/web/static/assets/088a-start-solid-white.svg rename to neural_compressor/ux/web/static/assets/088a-start-solid-white.svg diff --git a/lpot/ux/web/static/assets/088a-start-solid.svg b/neural_compressor/ux/web/static/assets/088a-start-solid.svg similarity index 100% rename from lpot/ux/web/static/assets/088a-start-solid.svg rename to neural_compressor/ux/web/static/assets/088a-start-solid.svg diff --git a/lpot/ux/web/static/assets/145b-document-outlined-white.svg b/neural_compressor/ux/web/static/assets/145b-document-outlined-white.svg similarity index 100% rename from lpot/ux/web/static/assets/145b-document-outlined-white.svg rename to neural_compressor/ux/web/static/assets/145b-document-outlined-white.svg diff --git a/lpot/ux/web/static/assets/145b-document-outlined.svg b/neural_compressor/ux/web/static/assets/145b-document-outlined.svg similarity index 100% rename from lpot/ux/web/static/assets/145b-document-outlined.svg rename to neural_compressor/ux/web/static/assets/145b-document-outlined.svg diff --git a/lpot/ux/web/static/assets/146a-copy-solid.svg b/neural_compressor/ux/web/static/assets/146a-copy-solid.svg similarity index 100% rename from lpot/ux/web/static/assets/146a-copy-solid.svg rename to neural_compressor/ux/web/static/assets/146a-copy-solid.svg diff --git a/lpot/ux/web/static/assets/298a-workflow-process-solid.svg b/neural_compressor/ux/web/static/assets/298a-workflow-process-solid.svg similarity index 100% rename from lpot/ux/web/static/assets/298a-workflow-process-solid.svg rename to neural_compressor/ux/web/static/assets/298a-workflow-process-solid.svg diff --git a/lpot/ux/web/static/assets/create-new.png b/neural_compressor/ux/web/static/assets/create-new.png similarity index 100% rename from lpot/ux/web/static/assets/create-new.png rename to neural_compressor/ux/web/static/assets/create-new.png diff --git a/lpot/ux/web/static/assets/fonts/IntelClear_Bd.ttf b/neural_compressor/ux/web/static/assets/fonts/IntelClear_Bd.ttf similarity index 100% rename from lpot/ux/web/static/assets/fonts/IntelClear_Bd.ttf rename to neural_compressor/ux/web/static/assets/fonts/IntelClear_Bd.ttf diff --git a/lpot/ux/web/static/assets/fonts/IntelClear_Lt.ttf b/neural_compressor/ux/web/static/assets/fonts/IntelClear_Lt.ttf similarity index 100% rename from lpot/ux/web/static/assets/fonts/IntelClear_Lt.ttf rename to neural_compressor/ux/web/static/assets/fonts/IntelClear_Lt.ttf diff --git a/lpot/ux/web/static/assets/fonts/IntelClear_Rg.ttf b/neural_compressor/ux/web/static/assets/fonts/IntelClear_Rg.ttf similarity index 100% rename from lpot/ux/web/static/assets/fonts/IntelClear_Rg.ttf rename to neural_compressor/ux/web/static/assets/fonts/IntelClear_Rg.ttf diff --git a/lpot/ux/web/static/assets/fonts/intelone-display-bold.ttf b/neural_compressor/ux/web/static/assets/fonts/intelone-display-bold.ttf similarity index 100% rename from lpot/ux/web/static/assets/fonts/intelone-display-bold.ttf rename to neural_compressor/ux/web/static/assets/fonts/intelone-display-bold.ttf diff --git a/lpot/ux/web/static/assets/fonts/intelone-display-light.ttf b/neural_compressor/ux/web/static/assets/fonts/intelone-display-light.ttf similarity index 100% rename from lpot/ux/web/static/assets/fonts/intelone-display-light.ttf rename to neural_compressor/ux/web/static/assets/fonts/intelone-display-light.ttf diff --git a/lpot/ux/web/static/assets/fonts/intelone-display-regular.ttf b/neural_compressor/ux/web/static/assets/fonts/intelone-display-regular.ttf similarity index 100% rename from lpot/ux/web/static/assets/fonts/intelone-display-regular.ttf rename to neural_compressor/ux/web/static/assets/fonts/intelone-display-regular.ttf diff --git a/lpot/ux/web/static/assets/logo-energyblue-72px.svg b/neural_compressor/ux/web/static/assets/logo-energyblue-72px.svg similarity index 100% rename from lpot/ux/web/static/assets/logo-energyblue-72px.svg rename to neural_compressor/ux/web/static/assets/logo-energyblue-72px.svg diff --git a/lpot/ux/web/static/assets/model-file.svg b/neural_compressor/ux/web/static/assets/model-file.svg similarity index 100% rename from lpot/ux/web/static/assets/model-file.svg rename to neural_compressor/ux/web/static/assets/model-file.svg diff --git a/lpot/ux/web/static/assets/model-folder.svg b/neural_compressor/ux/web/static/assets/model-folder.svg similarity index 100% rename from lpot/ux/web/static/assets/model-folder.svg rename to neural_compressor/ux/web/static/assets/model-folder.svg diff --git a/lpot/ux/web/static/assets/nn.png b/neural_compressor/ux/web/static/assets/nn.png similarity index 100% rename from lpot/ux/web/static/assets/nn.png rename to neural_compressor/ux/web/static/assets/nn.png diff --git a/lpot/ux/web/static/index.html b/neural_compressor/ux/web/static/index.html similarity index 100% rename from lpot/ux/web/static/index.html rename to neural_compressor/ux/web/static/index.html diff --git a/lpot/ux/web/static/main-es2015.0fb3e05f0dfea0044b63.js b/neural_compressor/ux/web/static/main-es2015.0fb3e05f0dfea0044b63.js similarity index 93% rename from lpot/ux/web/static/main-es2015.0fb3e05f0dfea0044b63.js rename to neural_compressor/ux/web/static/main-es2015.0fb3e05f0dfea0044b63.js index 35891a233ed..19e7d2fd348 100644 --- a/lpot/ux/web/static/main-es2015.0fb3e05f0dfea0044b63.js +++ b/neural_compressor/ux/web/static/main-es2015.0fb3e05f0dfea0044b63.js @@ -1 +1 @@ -(self.webpackChunkgui=self.webpackChunkgui||[]).push([[179],{98255:function(t){function e(t){return Promise.resolve().then(function(){var e=new Error("Cannot find module '"+t+"'");throw e.code="MODULE_NOT_FOUND",e})}e.keys=function(){return[]},e.resolve=e,e.id=98255,t.exports=e},7591:function(t){function e(t){this.ms=(t=t||{}).min||100,this.max=t.max||1e4,this.factor=t.factor||2,this.jitter=t.jitter>0&&t.jitter<=1?t.jitter:0,this.attempts=0}t.exports=e,e.prototype.duration=function(){var t=this.ms*Math.pow(this.factor,this.attempts++);if(this.jitter){var e=Math.random(),n=Math.floor(e*this.jitter*t);t=0==(1&Math.floor(10*e))?t-n:t+n}return 0|Math.min(t,this.max)},e.prototype.reset=function(){this.attempts=0},e.prototype.setMin=function(t){this.ms=t},e.prototype.setMax=function(t){this.max=t},e.prototype.setJitter=function(t){this.jitter=t}},77846:function(t,e){!function(t){"use strict";e.encode=function(e){var n,i=new Uint8Array(e),r=i.length,s="";for(n=0;n>2],s+=t[(3&i[n])<<4|i[n+1]>>4],s+=t[(15&i[n+1])<<2|i[n+2]>>6],s+=t[63&i[n+2]];return r%3==2?s=s.substring(0,s.length-1)+"=":r%3==1&&(s=s.substring(0,s.length-2)+"=="),s},e.decode=function(e){var n,i,r,s,o,a=.75*e.length,l=e.length,c=0;"="===e[e.length-1]&&(a--,"="===e[e.length-2]&&a--);var h=new ArrayBuffer(a),u=new Uint8Array(h);for(n=0;n>4,u[c++]=(15&r)<<4|s>>2,u[c++]=(3&s)<<6|63&o;return h}}("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")},96434:function(t,e){"use strict";e.byteLength=function(t){var e=l(t),n=e[1];return 3*(e[0]+n)/4-n},e.toByteArray=function(t){var e,n,s=l(t),o=s[0],a=s[1],c=new r(function(t,e,n){return 3*(e+n)/4-n}(0,o,a)),h=0,u=a>0?o-4:o;for(n=0;n>16&255,c[h++]=e>>8&255,c[h++]=255&e;return 2===a&&(e=i[t.charCodeAt(n)]<<2|i[t.charCodeAt(n+1)]>>4,c[h++]=255&e),1===a&&(e=i[t.charCodeAt(n)]<<10|i[t.charCodeAt(n+1)]<<4|i[t.charCodeAt(n+2)]>>2,c[h++]=e>>8&255,c[h++]=255&e),c},e.fromByteArray=function(t){for(var e,i=t.length,r=i%3,s=[],o=16383,a=0,l=i-r;al?l:a+o));return 1===r?s.push(n[(e=t[i-1])>>2]+n[e<<4&63]+"=="):2===r&&s.push(n[(e=(t[i-2]<<8)+t[i-1])>>10]+n[e>>4&63]+n[e<<2&63]+"="),s.join("")};for(var n=[],i=[],r="undefined"!=typeof Uint8Array?Uint8Array:Array,s="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",o=0,a=s.length;o0)throw new Error("Invalid string. Length must be a multiple of 4");var n=t.indexOf("=");return-1===n&&(n=e),[n,n===e?0:4-n%4]}function c(t){return n[t>>18&63]+n[t>>12&63]+n[t>>6&63]+n[63&t]}function h(t,e,n){for(var i=[],r=e;ro)throw new RangeError('The value "'+t+'" is invalid for option "size"');var e=new Uint8Array(t);return Object.setPrototypeOf(e,l.prototype),e}function l(t,e,n){if("number"==typeof t){if("string"==typeof e)throw new TypeError('The "string" argument must be of type string. Received type number');return u(t)}return c(t,e,n)}function c(t,e,n){if("string"==typeof t)return function(t,e){if(("string"!=typeof e||""===e)&&(e="utf8"),!l.isEncoding(e))throw new TypeError("Unknown encoding: "+e);var n=0|g(t,e),i=a(n),r=i.write(t,e);return r!==n&&(i=i.slice(0,r)),i}(t,e);if(ArrayBuffer.isView(t))return function(t){if(z(t,Uint8Array)){var e=new Uint8Array(t);return p(e.buffer,e.byteOffset,e.byteLength)}return d(t)}(t);if(null==t)throw new TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+typeof t);if(z(t,ArrayBuffer)||t&&z(t.buffer,ArrayBuffer)||"undefined"!=typeof SharedArrayBuffer&&(z(t,SharedArrayBuffer)||t&&z(t.buffer,SharedArrayBuffer)))return p(t,e,n);if("number"==typeof t)throw new TypeError('The "value" argument must not be of type number. Received type number');var i=t.valueOf&&t.valueOf();if(null!=i&&i!==t)return l.from(i,e,n);var r=function(t){if(l.isBuffer(t)){var e=0|f(t.length),n=a(e);return 0===n.length||t.copy(n,0,0,e),n}return void 0!==t.length?"number"!=typeof t.length||j(t.length)?a(0):d(t):"Buffer"===t.type&&Array.isArray(t.data)?d(t.data):void 0}(t);if(r)return r;if("undefined"!=typeof Symbol&&null!=Symbol.toPrimitive&&"function"==typeof t[Symbol.toPrimitive])return l.from(t[Symbol.toPrimitive]("string"),e,n);throw new TypeError("The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type "+typeof t)}function h(t){if("number"!=typeof t)throw new TypeError('"size" argument must be of type number');if(t<0)throw new RangeError('The value "'+t+'" is invalid for option "size"')}function u(t){return h(t),a(t<0?0:0|f(t))}function d(t){for(var e=t.length<0?0:0|f(t.length),n=a(e),i=0;i=o)throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+o.toString(16)+" bytes");return 0|t}function g(t,e){if(l.isBuffer(t))return t.length;if(ArrayBuffer.isView(t)||z(t,ArrayBuffer))return t.byteLength;if("string"!=typeof t)throw new TypeError('The "string" argument must be one of type string, Buffer, or ArrayBuffer. Received type '+typeof t);var n=t.length,i=arguments.length>2&&!0===arguments[2];if(!i&&0===n)return 0;for(var r=!1;;)switch(e){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":return N(t).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return B(t).length;default:if(r)return i?-1:N(t).length;e=(""+e).toLowerCase(),r=!0}}function m(t,e,n){var i=!1;if((void 0===e||e<0)&&(e=0),e>this.length||((void 0===n||n>this.length)&&(n=this.length),n<=0)||(n>>>=0)<=(e>>>=0))return"";for(t||(t="utf8");;)switch(t){case"hex":return T(this,e,n);case"utf8":case"utf-8":return S(this,e,n);case"ascii":return O(this,e,n);case"latin1":case"binary":return A(this,e,n);case"base64":return E(this,e,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return M(this,e,n);default:if(i)throw new TypeError("Unknown encoding: "+t);t=(t+"").toLowerCase(),i=!0}}function v(t,e,n){var i=t[e];t[e]=t[n],t[n]=i}function y(t,e,n,i,r){if(0===t.length)return-1;if("string"==typeof n?(i=n,n=0):n>2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),j(n=+n)&&(n=r?0:t.length-1),n<0&&(n=t.length+n),n>=t.length){if(r)return-1;n=t.length-1}else if(n<0){if(!r)return-1;n=0}if("string"==typeof e&&(e=l.from(e,i)),l.isBuffer(e))return 0===e.length?-1:_(t,e,n,i,r);if("number"==typeof e)return e&=255,"function"==typeof Uint8Array.prototype.indexOf?r?Uint8Array.prototype.indexOf.call(t,e,n):Uint8Array.prototype.lastIndexOf.call(t,e,n):_(t,[e],n,i,r);throw new TypeError("val must be string, number or Buffer")}function _(t,e,n,i,r){var s,o=1,a=t.length,l=e.length;if(void 0!==i&&("ucs2"===(i=String(i).toLowerCase())||"ucs-2"===i||"utf16le"===i||"utf-16le"===i)){if(t.length<2||e.length<2)return-1;o=2,a/=2,l/=2,n/=2}function c(t,e){return 1===o?t[e]:t.readUInt16BE(e*o)}if(r){var h=-1;for(s=n;sa&&(n=a-l),s=n;s>=0;s--){for(var u=!0,d=0;dr&&(i=r):i=r;var s=e.length;i>s/2&&(i=s/2);for(var o=0;o>8,r.push(n%256),r.push(i);return r}(e,t.length-n),t,n,i)}function E(t,e,n){return i.fromByteArray(0===e&&n===t.length?t:t.slice(e,n))}function S(t,e,n){n=Math.min(t.length,n);for(var i=[],r=e;r239?4:c>223?3:c>191?2:1;if(r+u<=n)switch(u){case 1:c<128&&(h=c);break;case 2:128==(192&(s=t[r+1]))&&(l=(31&c)<<6|63&s)>127&&(h=l);break;case 3:o=t[r+2],128==(192&(s=t[r+1]))&&128==(192&o)&&(l=(15&c)<<12|(63&s)<<6|63&o)>2047&&(l<55296||l>57343)&&(h=l);break;case 4:o=t[r+2],a=t[r+3],128==(192&(s=t[r+1]))&&128==(192&o)&&128==(192&a)&&(l=(15&c)<<18|(63&s)<<12|(63&o)<<6|63&a)>65535&&l<1114112&&(h=l)}null===h?(h=65533,u=1):h>65535&&(i.push((h-=65536)>>>10&1023|55296),h=56320|1023&h),i.push(h),r+=u}return function(t){var e=t.length;if(e<=4096)return String.fromCharCode.apply(String,t);for(var n="",i=0;ii)&&(n=i);for(var r="",s=e;sn)throw new RangeError("Trying to access beyond buffer length")}function P(t,e,n,i,r,s){if(!l.isBuffer(t))throw new TypeError('"buffer" argument must be a Buffer instance');if(e>r||et.length)throw new RangeError("Index out of range")}function L(t,e,n,i,r,s){if(n+i>t.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("Index out of range")}function I(t,e,n,i,s){return e=+e,n>>>=0,s||L(t,0,n,4),r.write(t,e,n,i,23,4),n+4}function F(t,e,n,i,s){return e=+e,n>>>=0,s||L(t,0,n,8),r.write(t,e,n,i,52,8),n+8}e.kMaxLength=o,!(l.TYPED_ARRAY_SUPPORT=function(){try{var t=new Uint8Array(1),e={foo:function(){return 42}};return Object.setPrototypeOf(e,Uint8Array.prototype),Object.setPrototypeOf(t,e),42===t.foo()}catch(n){return!1}}())&&"undefined"!=typeof console&&"function"==typeof console.error&&console.error("This browser lacks typed array (Uint8Array) support which is required by `buffer` v5.x. Use `buffer` v4.x if you require old browser support."),Object.defineProperty(l.prototype,"parent",{enumerable:!0,get:function(){if(l.isBuffer(this))return this.buffer}}),Object.defineProperty(l.prototype,"offset",{enumerable:!0,get:function(){if(l.isBuffer(this))return this.byteOffset}}),l.poolSize=8192,l.from=function(t,e,n){return c(t,e,n)},Object.setPrototypeOf(l.prototype,Uint8Array.prototype),Object.setPrototypeOf(l,Uint8Array),l.alloc=function(t,e,n){return function(t,e,n){return h(t),t<=0?a(t):void 0!==e?"string"==typeof n?a(t).fill(e,n):a(t).fill(e):a(t)}(t,e,n)},l.allocUnsafe=function(t){return u(t)},l.allocUnsafeSlow=function(t){return u(t)},l.isBuffer=function(t){return null!=t&&!0===t._isBuffer&&t!==l.prototype},l.compare=function(t,e){if(z(t,Uint8Array)&&(t=l.from(t,t.offset,t.byteLength)),z(e,Uint8Array)&&(e=l.from(e,e.offset,e.byteLength)),!l.isBuffer(t)||!l.isBuffer(e))throw new TypeError('The "buf1", "buf2" arguments must be one of type Buffer or Uint8Array');if(t===e)return 0;for(var n=t.length,i=e.length,r=0,s=Math.min(n,i);ri.length?l.from(s).copy(i,r):Uint8Array.prototype.set.call(i,s,r);else{if(!l.isBuffer(s))throw new TypeError('"list" argument must be an Array of Buffers');s.copy(i,r)}r+=s.length}return i},l.byteLength=g,l.prototype._isBuffer=!0,l.prototype.swap16=function(){var t=this.length;if(t%2!=0)throw new RangeError("Buffer size must be a multiple of 16-bits");for(var e=0;en&&(t+=" ... "),""},s&&(l.prototype[s]=l.prototype.inspect),l.prototype.compare=function(t,e,n,i,r){if(z(t,Uint8Array)&&(t=l.from(t,t.offset,t.byteLength)),!l.isBuffer(t))throw new TypeError('The "target" argument must be one of type Buffer or Uint8Array. Received type '+typeof t);if(void 0===e&&(e=0),void 0===n&&(n=t?t.length:0),void 0===i&&(i=0),void 0===r&&(r=this.length),e<0||n>t.length||i<0||r>this.length)throw new RangeError("out of range index");if(i>=r&&e>=n)return 0;if(i>=r)return-1;if(e>=n)return 1;if(this===t)return 0;for(var s=(r>>>=0)-(i>>>=0),o=(n>>>=0)-(e>>>=0),a=Math.min(s,o),c=this.slice(i,r),h=t.slice(e,n),u=0;u>>=0,isFinite(n)?(n>>>=0,void 0===i&&(i="utf8")):(i=n,n=void 0)}var r=this.length-e;if((void 0===n||n>r)&&(n=r),t.length>0&&(n<0||e<0)||e>this.length)throw new RangeError("Attempt to write outside buffer bounds");i||(i="utf8");for(var s=!1;;)switch(i){case"hex":return b(this,t,e,n);case"utf8":case"utf-8":return w(this,t,e,n);case"ascii":case"latin1":case"binary":return x(this,t,e,n);case"base64":return C(this,t,e,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return k(this,t,e,n);default:if(s)throw new TypeError("Unknown encoding: "+i);i=(""+i).toLowerCase(),s=!0}},l.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}},l.prototype.slice=function(t,e){var n=this.length;(t=~~t)<0?(t+=n)<0&&(t=0):t>n&&(t=n),(e=void 0===e?n:~~e)<0?(e+=n)<0&&(e=0):e>n&&(e=n),e>>=0,e>>>=0,n||D(t,e,this.length);for(var i=this[t],r=1,s=0;++s>>=0,e>>>=0,n||D(t,e,this.length);for(var i=this[t+--e],r=1;e>0&&(r*=256);)i+=this[t+--e]*r;return i},l.prototype.readUint8=l.prototype.readUInt8=function(t,e){return t>>>=0,e||D(t,1,this.length),this[t]},l.prototype.readUint16LE=l.prototype.readUInt16LE=function(t,e){return t>>>=0,e||D(t,2,this.length),this[t]|this[t+1]<<8},l.prototype.readUint16BE=l.prototype.readUInt16BE=function(t,e){return t>>>=0,e||D(t,2,this.length),this[t]<<8|this[t+1]},l.prototype.readUint32LE=l.prototype.readUInt32LE=function(t,e){return t>>>=0,e||D(t,4,this.length),(this[t]|this[t+1]<<8|this[t+2]<<16)+16777216*this[t+3]},l.prototype.readUint32BE=l.prototype.readUInt32BE=function(t,e){return t>>>=0,e||D(t,4,this.length),16777216*this[t]+(this[t+1]<<16|this[t+2]<<8|this[t+3])},l.prototype.readIntLE=function(t,e,n){t>>>=0,e>>>=0,n||D(t,e,this.length);for(var i=this[t],r=1,s=0;++s=(r*=128)&&(i-=Math.pow(2,8*e)),i},l.prototype.readIntBE=function(t,e,n){t>>>=0,e>>>=0,n||D(t,e,this.length);for(var i=e,r=1,s=this[t+--i];i>0&&(r*=256);)s+=this[t+--i]*r;return s>=(r*=128)&&(s-=Math.pow(2,8*e)),s},l.prototype.readInt8=function(t,e){return t>>>=0,e||D(t,1,this.length),128&this[t]?-1*(255-this[t]+1):this[t]},l.prototype.readInt16LE=function(t,e){t>>>=0,e||D(t,2,this.length);var n=this[t]|this[t+1]<<8;return 32768&n?4294901760|n:n},l.prototype.readInt16BE=function(t,e){t>>>=0,e||D(t,2,this.length);var n=this[t+1]|this[t]<<8;return 32768&n?4294901760|n:n},l.prototype.readInt32LE=function(t,e){return t>>>=0,e||D(t,4,this.length),this[t]|this[t+1]<<8|this[t+2]<<16|this[t+3]<<24},l.prototype.readInt32BE=function(t,e){return t>>>=0,e||D(t,4,this.length),this[t]<<24|this[t+1]<<16|this[t+2]<<8|this[t+3]},l.prototype.readFloatLE=function(t,e){return t>>>=0,e||D(t,4,this.length),r.read(this,t,!0,23,4)},l.prototype.readFloatBE=function(t,e){return t>>>=0,e||D(t,4,this.length),r.read(this,t,!1,23,4)},l.prototype.readDoubleLE=function(t,e){return t>>>=0,e||D(t,8,this.length),r.read(this,t,!0,52,8)},l.prototype.readDoubleBE=function(t,e){return t>>>=0,e||D(t,8,this.length),r.read(this,t,!1,52,8)},l.prototype.writeUintLE=l.prototype.writeUIntLE=function(t,e,n,i){t=+t,e>>>=0,n>>>=0,i||P(this,t,e,n,Math.pow(2,8*n)-1,0);var r=1,s=0;for(this[e]=255&t;++s>>=0,n>>>=0,i||P(this,t,e,n,Math.pow(2,8*n)-1,0);var r=n-1,s=1;for(this[e+r]=255&t;--r>=0&&(s*=256);)this[e+r]=t/s&255;return e+n},l.prototype.writeUint8=l.prototype.writeUInt8=function(t,e,n){return t=+t,e>>>=0,n||P(this,t,e,1,255,0),this[e]=255&t,e+1},l.prototype.writeUint16LE=l.prototype.writeUInt16LE=function(t,e,n){return t=+t,e>>>=0,n||P(this,t,e,2,65535,0),this[e]=255&t,this[e+1]=t>>>8,e+2},l.prototype.writeUint16BE=l.prototype.writeUInt16BE=function(t,e,n){return t=+t,e>>>=0,n||P(this,t,e,2,65535,0),this[e]=t>>>8,this[e+1]=255&t,e+2},l.prototype.writeUint32LE=l.prototype.writeUInt32LE=function(t,e,n){return t=+t,e>>>=0,n||P(this,t,e,4,4294967295,0),this[e+3]=t>>>24,this[e+2]=t>>>16,this[e+1]=t>>>8,this[e]=255&t,e+4},l.prototype.writeUint32BE=l.prototype.writeUInt32BE=function(t,e,n){return t=+t,e>>>=0,n||P(this,t,e,4,4294967295,0),this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t,e+4},l.prototype.writeIntLE=function(t,e,n,i){if(t=+t,e>>>=0,!i){var r=Math.pow(2,8*n-1);P(this,t,e,n,r-1,-r)}var s=0,o=1,a=0;for(this[e]=255&t;++s>0)-a&255;return e+n},l.prototype.writeIntBE=function(t,e,n,i){if(t=+t,e>>>=0,!i){var r=Math.pow(2,8*n-1);P(this,t,e,n,r-1,-r)}var s=n-1,o=1,a=0;for(this[e+s]=255&t;--s>=0&&(o*=256);)t<0&&0===a&&0!==this[e+s+1]&&(a=1),this[e+s]=(t/o>>0)-a&255;return e+n},l.prototype.writeInt8=function(t,e,n){return t=+t,e>>>=0,n||P(this,t,e,1,127,-128),t<0&&(t=255+t+1),this[e]=255&t,e+1},l.prototype.writeInt16LE=function(t,e,n){return t=+t,e>>>=0,n||P(this,t,e,2,32767,-32768),this[e]=255&t,this[e+1]=t>>>8,e+2},l.prototype.writeInt16BE=function(t,e,n){return t=+t,e>>>=0,n||P(this,t,e,2,32767,-32768),this[e]=t>>>8,this[e+1]=255&t,e+2},l.prototype.writeInt32LE=function(t,e,n){return t=+t,e>>>=0,n||P(this,t,e,4,2147483647,-2147483648),this[e]=255&t,this[e+1]=t>>>8,this[e+2]=t>>>16,this[e+3]=t>>>24,e+4},l.prototype.writeInt32BE=function(t,e,n){return t=+t,e>>>=0,n||P(this,t,e,4,2147483647,-2147483648),t<0&&(t=4294967295+t+1),this[e]=t>>>24,this[e+1]=t>>>16,this[e+2]=t>>>8,this[e+3]=255&t,e+4},l.prototype.writeFloatLE=function(t,e,n){return I(this,t,e,!0,n)},l.prototype.writeFloatBE=function(t,e,n){return I(this,t,e,!1,n)},l.prototype.writeDoubleLE=function(t,e,n){return F(this,t,e,!0,n)},l.prototype.writeDoubleBE=function(t,e,n){return F(this,t,e,!1,n)},l.prototype.copy=function(t,e,n,i){if(!l.isBuffer(t))throw new TypeError("argument should be a Buffer");if(n||(n=0),!i&&0!==i&&(i=this.length),e>=t.length&&(e=t.length),e||(e=0),i>0&&i=this.length)throw new RangeError("Index out of range");if(i<0)throw new RangeError("sourceEnd out of bounds");i>this.length&&(i=this.length),t.length-e>>=0,n=void 0===n?this.length:n>>>0,t||(t=0),"number"==typeof t)for(s=e;s55295&&n<57344){if(!r){if(n>56319){(e-=3)>-1&&s.push(239,191,189);continue}if(o+1===i){(e-=3)>-1&&s.push(239,191,189);continue}r=n;continue}if(n<56320){(e-=3)>-1&&s.push(239,191,189),r=n;continue}n=65536+(r-55296<<10|n-56320)}else r&&(e-=3)>-1&&s.push(239,191,189);if(r=null,n<128){if((e-=1)<0)break;s.push(n)}else if(n<2048){if((e-=2)<0)break;s.push(n>>6|192,63&n|128)}else if(n<65536){if((e-=3)<0)break;s.push(n>>12|224,n>>6&63|128,63&n|128)}else{if(!(n<1114112))throw new Error("Invalid code point");if((e-=4)<0)break;s.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}}return s}function B(t){return i.toByteArray(function(t){if((t=(t=t.split("=")[0]).trim().replace(R,"")).length<2)return"";for(;t.length%4!=0;)t+="=";return t}(t))}function V(t,e,n,i){for(var r=0;r=e.length||r>=t.length);++r)e[r+n]=t[r];return r}function z(t,e){return t instanceof e||null!=t&&null!=t.constructor&&null!=t.constructor.name&&t.constructor.name===e.name}function j(t){return t!=t}var H=function(){for(var t="0123456789abcdef",e=new Array(256),n=0;n<16;++n)for(var i=16*n,r=0;r<16;++r)e[i+r]=t[n]+t[r];return e}()},65899:function(t){function e(t){if(t)return function(t){for(var n in e.prototype)t[n]=e.prototype[n];return t}(t)}t.exports=e,e.prototype.on=e.prototype.addEventListener=function(t,e){return this._callbacks=this._callbacks||{},(this._callbacks["$"+t]=this._callbacks["$"+t]||[]).push(e),this},e.prototype.once=function(t,e){function n(){this.off(t,n),e.apply(this,arguments)}return n.fn=e,this.on(t,n),this},e.prototype.off=e.prototype.removeListener=e.prototype.removeAllListeners=e.prototype.removeEventListener=function(t,e){if(this._callbacks=this._callbacks||{},0==arguments.length)return this._callbacks={},this;var n=this._callbacks["$"+t];if(!n)return this;if(1==arguments.length)return delete this._callbacks["$"+t],this;for(var i,r=0;r0;--l)if(i=e[l].dequeue()){r=r.concat(a(t,e,n,i,!0));break}}return r}(n.graph,n.buckets,n.zeroIdx);return i.flatten(i.map(c,function(e){return t.outEdges(e.v,e.w)}),!0)};var o=i.constant(1);function a(t,e,n,r,s){var o=s?[]:void 0;return i.forEach(t.inEdges(r.v),function(i){var r=t.edge(i),a=t.node(i.v);s&&o.push({v:i.v,w:i.w}),a.out-=r,l(e,n,a)}),i.forEach(t.outEdges(r.v),function(i){var r=t.edge(i),s=t.node(i.w);s.in-=r,l(e,n,s)}),t.removeNode(r.v),o}function l(t,e,n){n.out?n.in?t[n.out-n.in+e].enqueue(n):t[t.length-1].enqueue(n):t[0].enqueue(n)}},98090:function(t,e,n){"use strict";var i=n(32712),r=n(66467),s=n(68284),o=n(42445),a=n(71824).normalizeRanks,l=n(62375),c=n(71824).removeEmptyRanks,h=n(25970),u=n(89752),d=n(16232),p=n(91989),f=n(46685),g=n(71824),m=n(77759).Graph;t.exports=function(t,e){var n=e&&e.debugTiming?g.time:g.notime;n("layout",function(){var e=n(" buildLayoutGraph",function(){return function(t){var e=new m({multigraph:!0,compound:!0}),n=S(t.graph());return e.setGraph(i.merge({},y,E(n,v),i.pick(n,_))),i.forEach(t.nodes(),function(n){var r=S(t.node(n));e.setNode(n,i.defaults(E(r,b),w)),e.setParent(n,t.parent(n))}),i.forEach(t.edges(),function(n){var r=S(t.edge(n));e.setEdge(n,i.merge({},C,E(r,x),i.pick(r,k)))}),e}(t)});n(" runLayout",function(){!function(t,e){e(" makeSpaceForEdgeLabels",function(){!function(t){var e=t.graph();e.ranksep/=2,i.forEach(t.edges(),function(n){var i=t.edge(n);i.minlen*=2,"c"!==i.labelpos.toLowerCase()&&("TB"===e.rankdir||"BT"===e.rankdir?i.width+=i.labeloffset:i.height+=i.labeloffset)})}(t)}),e(" removeSelfEdges",function(){!function(t){i.forEach(t.edges(),function(e){if(e.v===e.w){var n=t.node(e.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e,label:t.edge(e)}),t.removeEdge(e)}})}(t)}),e(" acyclic",function(){r.run(t)}),e(" nestingGraph.run",function(){h.run(t)}),e(" rank",function(){o(g.asNonCompoundGraph(t))}),e(" injectEdgeLabelProxies",function(){!function(t){i.forEach(t.edges(),function(e){var n=t.edge(e);if(n.width&&n.height){var i=t.node(e.v),r=t.node(e.w);g.addDummyNode(t,"edge-proxy",{rank:(r.rank-i.rank)/2+i.rank,e},"_ep")}})}(t)}),e(" removeEmptyRanks",function(){c(t)}),e(" nestingGraph.cleanup",function(){h.cleanup(t)}),e(" normalizeRanks",function(){a(t)}),e(" assignRankMinMax",function(){!function(t){var e=0;i.forEach(t.nodes(),function(n){var r=t.node(n);r.borderTop&&(r.minRank=t.node(r.borderTop).rank,r.maxRank=t.node(r.borderBottom).rank,e=i.max(e,r.maxRank))}),t.graph().maxRank=e}(t)}),e(" removeEdgeLabelProxies",function(){!function(t){i.forEach(t.nodes(),function(e){var n=t.node(e);"edge-proxy"===n.dummy&&(t.edge(n.e).labelRank=n.rank,t.removeNode(e))})}(t)}),e(" normalize.run",function(){s.run(t)}),e(" parentDummyChains",function(){l(t)}),e(" addBorderSegments",function(){u(t)}),e(" order",function(){p(t)}),e(" insertSelfEdges",function(){!function(t){var e=g.buildLayerMatrix(t);i.forEach(e,function(e){var n=0;i.forEach(e,function(e,r){var s=t.node(e);s.order=r+n,i.forEach(s.selfEdges,function(e){g.addDummyNode(t,"selfedge",{width:e.label.width,height:e.label.height,rank:s.rank,order:r+ ++n,e:e.e,label:e.label},"_se")}),delete s.selfEdges})})}(t)}),e(" adjustCoordinateSystem",function(){d.adjust(t)}),e(" position",function(){f(t)}),e(" positionSelfEdges",function(){!function(t){i.forEach(t.nodes(),function(e){var n=t.node(e);if("selfedge"===n.dummy){var i=t.node(n.e.v),r=i.x+i.width/2,s=i.y,o=n.x-r,a=i.height/2;t.setEdge(n.e,n.label),t.removeNode(e),n.label.points=[{x:r+2*o/3,y:s-a},{x:r+5*o/6,y:s-a},{x:r+o,y:s},{x:r+5*o/6,y:s+a},{x:r+2*o/3,y:s+a}],n.label.x=n.x,n.label.y=n.y}})}(t)}),e(" removeBorderNodes",function(){!function(t){i.forEach(t.nodes(),function(e){if(t.children(e).length){var n=t.node(e),r=t.node(n.borderTop),s=t.node(n.borderBottom),o=t.node(i.last(n.borderLeft)),a=t.node(i.last(n.borderRight));n.width=Math.abs(a.x-o.x),n.height=Math.abs(s.y-r.y),n.x=o.x+n.width/2,n.y=r.y+n.height/2}}),i.forEach(t.nodes(),function(e){"border"===t.node(e).dummy&&t.removeNode(e)})}(t)}),e(" normalize.undo",function(){s.undo(t)}),e(" fixupEdgeLabelCoords",function(){!function(t){i.forEach(t.edges(),function(e){var n=t.edge(e);if(i.has(n,"x"))switch(("l"===n.labelpos||"r"===n.labelpos)&&(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}})}(t)}),e(" undoCoordinateSystem",function(){d.undo(t)}),e(" translateGraph",function(){!function(t){var e=Number.POSITIVE_INFINITY,n=0,r=Number.POSITIVE_INFINITY,s=0,o=t.graph(),a=o.marginx||0,l=o.marginy||0;function c(t){var i=t.x,o=t.y,a=t.width,l=t.height;e=Math.min(e,i-a/2),n=Math.max(n,i+a/2),r=Math.min(r,o-l/2),s=Math.max(s,o+l/2)}i.forEach(t.nodes(),function(e){c(t.node(e))}),i.forEach(t.edges(),function(e){var n=t.edge(e);i.has(n,"x")&&c(n)}),e-=a,r-=l,i.forEach(t.nodes(),function(n){var i=t.node(n);i.x-=e,i.y-=r}),i.forEach(t.edges(),function(n){var s=t.edge(n);i.forEach(s.points,function(t){t.x-=e,t.y-=r}),i.has(s,"x")&&(s.x-=e),i.has(s,"y")&&(s.y-=r)}),o.width=n-e+a,o.height=s-r+l}(t)}),e(" assignNodeIntersects",function(){!function(t){i.forEach(t.edges(),function(e){var n,i,r=t.edge(e),s=t.node(e.v),o=t.node(e.w);r.points?(n=r.points[0],i=r.points[r.points.length-1]):(r.points=[],n=o,i=s),r.points.unshift(g.intersectRect(s,n)),r.points.push(g.intersectRect(o,i))})}(t)}),e(" reversePoints",function(){!function(t){i.forEach(t.edges(),function(e){var n=t.edge(e);n.reversed&&n.points.reverse()})}(t)}),e(" acyclic.undo",function(){r.undo(t)})}(e,n)}),n(" updateInputGraph",function(){!function(t,e){i.forEach(t.nodes(),function(n){var i=t.node(n),r=e.node(n);i&&(i.x=r.x,i.y=r.y,e.children(n).length&&(i.width=r.width,i.height=r.height))}),i.forEach(t.edges(),function(n){var r=t.edge(n),s=e.edge(n);r.points=s.points,i.has(s,"x")&&(r.x=s.x,r.y=s.y)}),t.graph().width=e.graph().width,t.graph().height=e.graph().height}(t,e)})})};var v=["nodesep","edgesep","ranksep","marginx","marginy"],y={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},_=["acyclicer","ranker","rankdir","align"],b=["width","height"],w={width:0,height:0},x=["minlen","weight","width","height","labeloffset"],C={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},k=["labelpos"];function E(t,e){return i.mapValues(i.pick(t,e),Number)}function S(t){var e={};return i.forEach(t,function(t,n){e[n.toLowerCase()]=t}),e}},32712:function(t,e,n){var i;try{i={cloneDeep:n(37524),constant:n(45654),defaults:n(36726),each:n(33718),filter:n(33834),find:n(98990),flatten:n(94694),forEach:n(64733),forIn:n(88577),has:n(32335),isUndefined:n(29569),last:n(27842),map:n(89718),mapValues:n(10182),max:n(31334),merge:n(3912),min:n(41518),minBy:n(5353),now:n(30765),pick:n(31133),range:n(21221),reduce:n(86101),sortBy:n(70280),uniqueId:n(65927),values:n(23348),zipObject:n(31507)}}catch(r){}i||(i=window._),t.exports=i},25970:function(t,e,n){var i=n(32712),r=n(71824);function s(t,e,n,o,a,l,c){var h=t.children(c);if(h.length){var u=r.addBorderNode(t,"_bt"),d=r.addBorderNode(t,"_bb"),p=t.node(c);t.setParent(u,c),p.borderTop=u,t.setParent(d,c),p.borderBottom=d,i.forEach(h,function(i){s(t,e,n,o,a,l,i);var r=t.node(i),h=r.borderTop?r.borderTop:i,p=r.borderBottom?r.borderBottom:i,f=r.borderTop?o:2*o,g=h!==p?1:a-l[c]+1;t.setEdge(u,h,{weight:f,minlen:g,nestingEdge:!0}),t.setEdge(p,d,{weight:f,minlen:g,nestingEdge:!0})}),t.parent(c)||t.setEdge(e,u,{weight:0,minlen:a+l[c]})}else c!==e&&t.setEdge(e,c,{weight:0,minlen:n})}t.exports={run:function(t){var e=r.addDummyNode(t,"root",{},"_root"),n=function(t){var e={};function n(r,s){var o=t.children(r);o&&o.length&&i.forEach(o,function(t){n(t,s+1)}),e[r]=s}return i.forEach(t.children(),function(t){n(t,1)}),e}(t),o=i.max(i.values(n))-1,a=2*o+1;t.graph().nestingRoot=e,i.forEach(t.edges(),function(e){t.edge(e).minlen*=a});var l=function(t){return i.reduce(t.edges(),function(e,n){return e+t.edge(n).weight},0)}(t)+1;i.forEach(t.children(),function(i){s(t,e,a,l,o,n,i)}),t.graph().nodeRankFactor=a},cleanup:function(t){var e=t.graph();t.removeNode(e.nestingRoot),delete e.nestingRoot,i.forEach(t.edges(),function(e){t.edge(e).nestingEdge&&t.removeEdge(e)})}}},68284:function(t,e,n){"use strict";var i=n(32712),r=n(71824);t.exports={run:function(t){t.graph().dummyChains=[],i.forEach(t.edges(),function(e){!function(t,e){var n=e.v,i=t.node(n).rank,s=e.w,o=t.node(s).rank,a=e.name,l=t.edge(e),c=l.labelRank;if(o!==i+1){var h,u,d;for(t.removeEdge(e),d=0,++i;i0;)e%2&&(n+=l[e+1]),l[e=e-1>>1]+=t.weight;c+=t.weight*n})),c}t.exports=function(t,e){for(var n=0,i=1;i=2),a=h.buildLayerMatrix(t);var m=s(t,a);m=t.barycenter)&&function(t,e){var n=0,i=0;t.weight&&(n+=t.barycenter*t.weight,i+=t.weight),e.weight&&(n+=e.barycenter*e.weight,i+=e.weight),t.vs=e.vs.concat(t.vs),t.barycenter=n/i,t.weight=i,t.i=Math.min(e.i,t.i),e.merged=!0}(t,e)}}function r(e){return function(n){n.in.push(e),0==--n.indegree&&t.push(n)}}for(;t.length;){var s=t.pop();e.push(s),i.forEach(s.in.reverse(),n(s)),i.forEach(s.out,r(s))}return i.map(i.filter(e,function(t){return!t.merged}),function(t){return i.pick(t,["vs","i","barycenter","weight"])})}(i.filter(n,function(t){return!t.indegree}))}},19939:function(t,e,n){var i=n(32712),r=n(28372),s=n(90965),o=n(95291);t.exports=function t(e,n,a,l){var c=e.children(n),h=e.node(n),u=h?h.borderLeft:void 0,d=h?h.borderRight:void 0,p={};u&&(c=i.filter(c,function(t){return t!==u&&t!==d}));var f=r(e,c);i.forEach(f,function(n){if(e.children(n.v).length){var r=t(e,n.v,a,l);p[n.v]=r,i.has(r,"barycenter")&&function(t,e){i.isUndefined(t.barycenter)?(t.barycenter=e.barycenter,t.weight=e.weight):(t.barycenter=(t.barycenter*t.weight+e.barycenter*e.weight)/(t.weight+e.weight),t.weight+=e.weight)}(n,r)}});var g=s(f,a);!function(t,e){i.forEach(t,function(t){t.vs=i.flatten(t.vs.map(function(t){return e[t]?e[t].vs:t}),!0)})}(g,p);var m=o(g,l);if(u&&(m.vs=i.flatten([u,m.vs,d],!0),e.predecessors(u).length)){var v=e.node(e.predecessors(u)[0]),y=e.node(e.predecessors(d)[0]);i.has(m,"barycenter")||(m.barycenter=0,m.weight=0),m.barycenter=(m.barycenter*m.weight+v.order+y.order)/(m.weight+2),m.weight+=2}return m}},95291:function(t,e,n){var i=n(32712),r=n(71824);function s(t,e,n){for(var r;e.length&&(r=i.last(e)).i<=n;)e.pop(),t.push(r.vs),n++;return n}t.exports=function(t,e){var n=r.partition(t,function(t){return i.has(t,"barycenter")}),o=n.lhs,a=i.sortBy(n.rhs,function(t){return-t.i}),l=[],c=0,h=0,u=0;o.sort(function(t){return function(e,n){return e.barycentern.barycenter?1:t?n.i-e.i:e.i-n.i}}(!!e)),u=s(l,a,u),i.forEach(o,function(t){u+=t.vs.length,l.push(t.vs),c+=t.barycenter*t.weight,h+=t.weight,u=s(l,a,u)});var d={vs:i.flatten(l,!0)};return h&&(d.barycenter=c/h,d.weight=h),d}},62375:function(t,e,n){var i=n(32712);t.exports=function(t){var e=function(t){var e={},n=0;return i.forEach(t.children(),function r(s){var o=n;i.forEach(t.children(s),r),e[s]={low:o,lim:n++}}),e}(t);i.forEach(t.graph().dummyChains,function(n){for(var i=t.node(n),r=i.edgeObj,s=function(t,e,n,i){var r,s,o=[],a=[],l=Math.min(e[n].low,e[i].low),c=Math.max(e[n].lim,e[i].lim);r=n;do{r=t.parent(r),o.push(r)}while(r&&(e[r].low>l||c>e[r].lim));for(s=r,r=i;(r=t.parent(r))!==s;)a.push(r);return{path:o.concat(a.reverse()),lca:s}}(t,e,r.v,r.w),o=s.path,a=s.lca,l=0,c=o[l],h=!0;n!==r.w;){if(i=t.node(n),h){for(;(c=o[l])!==a&&t.node(c).maxRanka)&&l(n,e,c)})})}return i.reduce(e,function(e,n){var s,o=-1,a=0;return i.forEach(n,function(i,l){if("border"===t.node(i).dummy){var c=t.predecessors(i);c.length&&(s=t.node(c[0]).order,r(n,a,l,o,s),a=l,o=s)}r(n,a,n.length,s,e.length)}),n}),n}function l(t,e,n){if(e>n){var i=e;e=n,n=i}var r=t[e];r||(t[e]=r={}),r[n]=!0}function c(t,e,n){if(e>n){var r=e;e=n,n=r}return i.has(t[e],n)}function h(t,e,n,r){var s={},o={},a={};return i.forEach(e,function(t){i.forEach(t,function(t,e){s[t]=t,o[t]=t,a[t]=e})}),i.forEach(e,function(t){var e=-1;i.forEach(t,function(t){var l=r(t);if(l.length)for(var h=((l=i.sortBy(l,function(t){return a[t]})).length-1)/2,u=Math.floor(h),d=Math.ceil(h);u<=d;++u){var p=l[u];o[t]===t&&el.lim&&(c=l,h=!0);var u=i.filter(e.edges(),function(e){return h===y(0,t.node(e.v),c)&&h!==y(0,t.node(e.w),c)});return i.minBy(u,function(t){return s(e,t)})}function v(t,e,n,r){t.removeEdge(n.v,n.w),t.setEdge(r.v,r.w,{}),p(t),u(t,e),function(t,e){var n=i.find(t.nodes(),function(t){return!e.node(t).parent}),r=a(t,n);r=r.slice(1),i.forEach(r,function(n){var i=t.node(n).parent,r=e.edge(n,i),s=!1;r||(r=e.edge(i,n),s=!0),e.node(n).rank=e.node(i).rank+(s?r.minlen:-r.minlen)})}(t,e)}function y(t,e,n){return n.low<=e.lim&&e.lim<=n.lim}t.exports=h,h.initLowLimValues=p,h.initCutValues=u,h.calcCutValue=d,h.leaveEdge=g,h.enterEdge=m,h.exchangeEdges=v},21618:function(t,e,n){"use strict";var i=n(32712);t.exports={longestPath:function(t){var e={};i.forEach(t.sources(),function n(r){var s=t.node(r);if(i.has(e,r))return s.rank;e[r]=!0;var o=i.min(i.map(t.outEdges(r),function(e){return n(e.w)-t.edge(e).minlen}));return(o===Number.POSITIVE_INFINITY||null==o)&&(o=0),s.rank=o})},slack:function(t,e){return t.node(e.w).rank-t.node(e.v).rank-t.edge(e).minlen}}},71824:function(t,e,n){"use strict";var i=n(32712),r=n(77759).Graph;function s(t,e,n,r){var s;do{s=i.uniqueId(r)}while(t.hasNode(s));return n.dummy=e,t.setNode(s,n),s}function o(t){return i.max(i.map(t.nodes(),function(e){var n=t.node(e).rank;if(!i.isUndefined(n))return n}))}t.exports={addDummyNode:s,simplify:function(t){var e=(new r).setGraph(t.graph());return i.forEach(t.nodes(),function(n){e.setNode(n,t.node(n))}),i.forEach(t.edges(),function(n){var i=e.edge(n.v,n.w)||{weight:0,minlen:1},r=t.edge(n);e.setEdge(n.v,n.w,{weight:i.weight+r.weight,minlen:Math.max(i.minlen,r.minlen)})}),e},asNonCompoundGraph:function(t){var e=new r({multigraph:t.isMultigraph()}).setGraph(t.graph());return i.forEach(t.nodes(),function(n){t.children(n).length||e.setNode(n,t.node(n))}),i.forEach(t.edges(),function(n){e.setEdge(n,t.edge(n))}),e},successorWeights:function(t){var e=i.map(t.nodes(),function(e){var n={};return i.forEach(t.outEdges(e),function(e){n[e.w]=(n[e.w]||0)+t.edge(e).weight}),n});return i.zipObject(t.nodes(),e)},predecessorWeights:function(t){var e=i.map(t.nodes(),function(e){var n={};return i.forEach(t.inEdges(e),function(e){n[e.v]=(n[e.v]||0)+t.edge(e).weight}),n});return i.zipObject(t.nodes(),e)},intersectRect:function(t,e){var n,i,r=t.x,s=t.y,o=e.x-r,a=e.y-s,l=t.width/2,c=t.height/2;if(!o&&!a)throw new Error("Not possible to find intersection inside of the rectangle");return Math.abs(a)*l>Math.abs(o)*c?(a<0&&(c=-c),n=c*o/a,i=c):(o<0&&(l=-l),n=l,i=l*a/o),{x:r+n,y:s+i}},buildLayerMatrix:function(t){var e=i.map(i.range(o(t)+1),function(){return[]});return i.forEach(t.nodes(),function(n){var r=t.node(n),s=r.rank;i.isUndefined(s)||(e[s][r.order]=n)}),e},normalizeRanks:function(t){var e=i.min(i.map(t.nodes(),function(e){return t.node(e).rank}));i.forEach(t.nodes(),function(n){var r=t.node(n);i.has(r,"rank")&&(r.rank-=e)})},removeEmptyRanks:function(t){var e=i.min(i.map(t.nodes(),function(e){return t.node(e).rank})),n=[];i.forEach(t.nodes(),function(i){var r=t.node(i).rank-e;n[r]||(n[r]=[]),n[r].push(i)});var r=0,s=t.graph().nodeRankFactor;i.forEach(n,function(e,n){i.isUndefined(e)&&n%s!=0?--r:r&&i.forEach(e,function(e){t.node(e).rank+=r})})},addBorderNode:function(t,e,n,i){var r={width:0,height:0};return arguments.length>=4&&(r.rank=n,r.order=i),s(t,"border",r,e)},maxRank:o,partition:function(t,e){var n={lhs:[],rhs:[]};return i.forEach(t,function(t){e(t)?n.lhs.push(t):n.rhs.push(t)}),n},time:function(t,e){var n=i.now();try{return e()}finally{console.log(t+" time: "+(i.now()-n)+"ms")}},notime:function(t,e){return e()}}},56854:function(t){t.exports="0.8.5"},62036:function(t,e,n){e.formatArgs=function(e){if(e[0]=(this.useColors?"%c":"")+this.namespace+(this.useColors?" %c":" ")+e[0]+(this.useColors?"%c ":" ")+"+"+t.exports.humanize(this.diff),!this.useColors)return;const n="color: "+this.color;e.splice(1,0,n,"color: inherit");let i=0,r=0;e[0].replace(/%[a-zA-Z%]/g,t=>{"%%"!==t&&(i++,"%c"===t&&(r=i))}),e.splice(r,0,n)},e.save=function(t){try{t?e.storage.setItem("debug",t):e.storage.removeItem("debug")}catch(n){}},e.load=function(){let t;try{t=e.storage.getItem("debug")}catch(n){}return!t&&"undefined"!=typeof process&&"env"in process&&(t=process.env.DEBUG),t},e.useColors=function(){return!("undefined"==typeof window||!window.process||"renderer"!==window.process.type&&!window.process.__nwjs)||("undefined"==typeof navigator||!navigator.userAgent||!navigator.userAgent.toLowerCase().match(/(edge|trident)\/(\d+)/))&&("undefined"!=typeof document&&document.documentElement&&document.documentElement.style&&document.documentElement.style.WebkitAppearance||"undefined"!=typeof window&&window.console&&(window.console.firebug||window.console.exception&&window.console.table)||"undefined"!=typeof navigator&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/firefox\/(\d+)/)&&parseInt(RegExp.$1,10)>=31||"undefined"!=typeof navigator&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/applewebkit\/(\d+)/))},e.storage=function(){try{return localStorage}catch(t){}}(),e.destroy=(()=>{let t=!1;return()=>{t||(t=!0,console.warn("Instance method `debug.destroy()` is deprecated and no longer does anything. It will be removed in the next major version of `debug`."))}})(),e.colors=["#0000CC","#0000FF","#0033CC","#0033FF","#0066CC","#0066FF","#0099CC","#0099FF","#00CC00","#00CC33","#00CC66","#00CC99","#00CCCC","#00CCFF","#3300CC","#3300FF","#3333CC","#3333FF","#3366CC","#3366FF","#3399CC","#3399FF","#33CC00","#33CC33","#33CC66","#33CC99","#33CCCC","#33CCFF","#6600CC","#6600FF","#6633CC","#6633FF","#66CC00","#66CC33","#9900CC","#9900FF","#9933CC","#9933FF","#99CC00","#99CC33","#CC0000","#CC0033","#CC0066","#CC0099","#CC00CC","#CC00FF","#CC3300","#CC3333","#CC3366","#CC3399","#CC33CC","#CC33FF","#CC6600","#CC6633","#CC9900","#CC9933","#CCCC00","#CCCC33","#FF0000","#FF0033","#FF0066","#FF0099","#FF00CC","#FF00FF","#FF3300","#FF3333","#FF3366","#FF3399","#FF33CC","#FF33FF","#FF6600","#FF6633","#FF9900","#FF9933","#FFCC00","#FFCC33"],e.log=console.debug||console.log||(()=>{}),t.exports=n(19859)(e);const{formatters:i}=t.exports;i.j=function(t){try{return JSON.stringify(t)}catch(e){return"[UnexpectedJSONParseError]: "+e.message}}},19859:function(t,e,n){t.exports=function(t){function e(t){let n,r=null;function s(...t){if(!s.enabled)return;const i=s,r=Number(new Date);i.diff=r-(n||r),i.prev=n,i.curr=r,n=r,t[0]=e.coerce(t[0]),"string"!=typeof t[0]&&t.unshift("%O");let o=0;t[0]=t[0].replace(/%([a-zA-Z%])/g,(n,r)=>{if("%%"===n)return"%";o++;const s=e.formatters[r];return"function"==typeof s&&(n=s.call(i,t[o]),t.splice(o,1),o--),n}),e.formatArgs.call(i,t),(i.log||e.log).apply(i,t)}return s.namespace=t,s.useColors=e.useColors(),s.color=e.selectColor(t),s.extend=i,s.destroy=e.destroy,Object.defineProperty(s,"enabled",{enumerable:!0,configurable:!1,get:()=>null===r?e.enabled(t):r,set:t=>{r=t}}),"function"==typeof e.init&&e.init(s),s}function i(t,n){const i=e(this.namespace+(void 0===n?":":n)+t);return i.log=this.log,i}function r(t){return t.toString().substring(2,t.toString().length-2).replace(/\.\*\?$/,"*")}return e.debug=e,e.default=e,e.coerce=function(t){return t instanceof Error?t.stack||t.message:t},e.disable=function(){const t=[...e.names.map(r),...e.skips.map(r).map(t=>"-"+t)].join(",");return e.enable(""),t},e.enable=function(t){let n;e.save(t),e.names=[],e.skips=[];const i=("string"==typeof t?t:"").split(/[\s,]+/),r=i.length;for(n=0;n{e[n]=t[n]}),e.names=[],e.skips=[],e.formatters={},e.selectColor=function(t){let n=0;for(let e=0;enew i(t,e),t.exports.Socket=i,t.exports.protocol=i.protocol,t.exports.Transport=n(31505),t.exports.transports=n(41674),t.exports.parser=n(82416)},21547:function(t,e,n){const i=n(41674),r=n(65899),s=n(62036)("engine.io-client:socket"),o=n(82416),a=n(44171),l=n(39603);class c extends r{constructor(t,e={}){super(),t&&"object"==typeof t&&(e=t,t=null),t?(t=a(t),e.hostname=t.host,e.secure="https"===t.protocol||"wss"===t.protocol,e.port=t.port,t.query&&(e.query=t.query)):e.host&&(e.hostname=a(e.host).host),this.secure=null!=e.secure?e.secure:"undefined"!=typeof location&&"https:"===location.protocol,e.hostname&&!e.port&&(e.port=this.secure?"443":"80"),this.hostname=e.hostname||("undefined"!=typeof location?location.hostname:"localhost"),this.port=e.port||("undefined"!=typeof location&&location.port?location.port:this.secure?443:80),this.transports=e.transports||["polling","websocket"],this.readyState="",this.writeBuffer=[],this.prevBufferLen=0,this.opts=Object.assign({path:"/engine.io",agent:!1,withCredentials:!1,upgrade:!0,jsonp:!0,timestampParam:"t",rememberUpgrade:!1,rejectUnauthorized:!0,perMessageDeflate:{threshold:1024},transportOptions:{}},e),this.opts.path=this.opts.path.replace(/\/$/,"")+"/","string"==typeof this.opts.query&&(this.opts.query=l.decode(this.opts.query)),this.id=null,this.upgrades=null,this.pingInterval=null,this.pingTimeout=null,this.pingTimeoutTimer=null,"function"==typeof addEventListener&&addEventListener("beforeunload",()=>{this.transport&&(this.transport.removeAllListeners(),this.transport.close())},!1),this.open()}createTransport(t){s('creating transport "%s"',t);const e=function(t){const e={};for(let n in t)t.hasOwnProperty(n)&&(e[n]=t[n]);return e}(this.opts.query);e.EIO=o.protocol,e.transport=t,this.id&&(e.sid=this.id);const n=Object.assign({},this.opts.transportOptions[t],this.opts,{query:e,socket:this,hostname:this.hostname,secure:this.secure,port:this.port});return s("options: %j",n),new i[t](n)}open(){let t;if(this.opts.rememberUpgrade&&c.priorWebsocketSuccess&&-1!==this.transports.indexOf("websocket"))t="websocket";else{if(0===this.transports.length){const t=this;return void setTimeout(function(){t.emit("error","No transports available")},0)}t=this.transports[0]}this.readyState="opening";try{t=this.createTransport(t)}catch(e){return s("error while creating transport: %s",e),this.transports.shift(),void this.open()}t.open(),this.setTransport(t)}setTransport(t){s("setting transport %s",t.name);const e=this;this.transport&&(s("clearing existing transport %s",this.transport.name),this.transport.removeAllListeners()),this.transport=t,t.on("drain",function(){e.onDrain()}).on("packet",function(t){e.onPacket(t)}).on("error",function(t){e.onError(t)}).on("close",function(){e.onClose("transport close")})}probe(t){s('probing transport "%s"',t);let e=this.createTransport(t,{probe:1}),n=!1;const i=this;function r(){if(i.onlyBinaryUpgrades){const t=!this.supportsBinary&&i.transport.supportsBinary;n=n||t}n||(s('probe transport "%s" opened',t),e.send([{type:"ping",data:"probe"}]),e.once("packet",function(r){if(!n)if("pong"===r.type&&"probe"===r.data){if(s('probe transport "%s" pong',t),i.upgrading=!0,i.emit("upgrading",e),!e)return;c.priorWebsocketSuccess="websocket"===e.name,s('pausing current transport "%s"',i.transport.name),i.transport.pause(function(){n||"closed"!==i.readyState&&(s("changing transport and sending upgrade packet"),d(),i.setTransport(e),e.send([{type:"upgrade"}]),i.emit("upgrade",e),e=null,i.upgrading=!1,i.flush())})}else{s('probe transport "%s" failed',t);const n=new Error("probe error");n.transport=e.name,i.emit("upgradeError",n)}}))}function o(){n||(n=!0,d(),e.close(),e=null)}function a(n){const r=new Error("probe error: "+n);r.transport=e.name,o(),s('probe transport "%s" failed because of error: %s',t,n),i.emit("upgradeError",r)}function l(){a("transport closed")}function h(){a("socket closed")}function u(t){e&&t.name!==e.name&&(s('"%s" works - aborting "%s"',t.name,e.name),o())}function d(){e.removeListener("open",r),e.removeListener("error",a),e.removeListener("close",l),i.removeListener("close",h),i.removeListener("upgrading",u)}c.priorWebsocketSuccess=!1,e.once("open",r),e.once("error",a),e.once("close",l),this.once("close",h),this.once("upgrading",u),e.open()}onOpen(){if(s("socket open"),this.readyState="open",c.priorWebsocketSuccess="websocket"===this.transport.name,this.emit("open"),this.flush(),"open"===this.readyState&&this.opts.upgrade&&this.transport.pause){s("starting upgrade probes");let t=0;const e=this.upgrades.length;for(;t{this.onClose("ping timeout")},this.pingInterval+this.pingTimeout)}onDrain(){this.writeBuffer.splice(0,this.prevBufferLen),this.prevBufferLen=0,0===this.writeBuffer.length?this.emit("drain"):this.flush()}flush(){"closed"!==this.readyState&&this.transport.writable&&!this.upgrading&&this.writeBuffer.length&&(s("flushing %d packets in socket",this.writeBuffer.length),this.transport.send(this.writeBuffer),this.prevBufferLen=this.writeBuffer.length,this.emit("flush"))}write(t,e,n){return this.sendPacket("message",t,e,n),this}send(t,e,n){return this.sendPacket("message",t,e,n),this}sendPacket(t,e,n,i){if("function"==typeof e&&(i=e,e=void 0),"function"==typeof n&&(i=n,n=null),"closing"===this.readyState||"closed"===this.readyState)return;(n=n||{}).compress=!1!==n.compress;const r={type:t,data:e,options:n};this.emit("packetCreate",r),this.writeBuffer.push(r),i&&this.once("flush",i),this.flush()}close(){const t=this;function e(){t.onClose("forced close"),s("socket closing - telling transport to close"),t.transport.close()}function n(){t.removeListener("upgrade",n),t.removeListener("upgradeError",n),e()}function i(){t.once("upgrade",n),t.once("upgradeError",n)}return("opening"===this.readyState||"open"===this.readyState)&&(this.readyState="closing",this.writeBuffer.length?this.once("drain",function(){this.upgrading?i():e()}):this.upgrading?i():e()),this}onError(t){s("socket error %j",t),c.priorWebsocketSuccess=!1,this.emit("error",t),this.onClose("transport error",t)}onClose(t,e){if("opening"===this.readyState||"open"===this.readyState||"closing"===this.readyState){s('socket close with reason: "%s"',t);const n=this;clearTimeout(this.pingIntervalTimer),clearTimeout(this.pingTimeoutTimer),this.transport.removeAllListeners("close"),this.transport.close(),this.transport.removeAllListeners(),this.readyState="closed",this.id=null,this.emit("close",t,e),n.writeBuffer=[],n.prevBufferLen=0}}filterUpgrades(t){const e=[];let n=0;const i=t.length;for(;n{},this.script.parentNode.removeChild(this.script),this.script=null),this.form&&(this.form.parentNode.removeChild(this.form),this.form=null,this.iframe=null),super.doClose()}doPoll(){const t=this,e=document.createElement("script");this.script&&(this.script.parentNode.removeChild(this.script),this.script=null),e.async=!0,e.src=this.uri(),e.onerror=function(e){t.onError("jsonp poll error",e)};const n=document.getElementsByTagName("script")[0];n?n.parentNode.insertBefore(e,n):(document.head||document.body).appendChild(e),this.script=e,"undefined"!=typeof navigator&&/gecko/i.test(navigator.userAgent)&&setTimeout(function(){const t=document.createElement("iframe");document.body.appendChild(t),document.body.removeChild(t)},100)}doWrite(t,e){const n=this;let i;if(!this.form){const t=document.createElement("form"),e=document.createElement("textarea"),n=this.iframeId="eio_iframe_"+this.index;t.className="socketio",t.style.position="absolute",t.style.top="-1000px",t.style.left="-1000px",t.target=n,t.method="POST",t.setAttribute("accept-charset","utf-8"),e.name="d",t.appendChild(e),document.body.appendChild(t),this.form=t,this.area=e}function r(){a(),e()}function a(){if(n.iframe)try{n.form.removeChild(n.iframe)}catch(t){n.onError("jsonp polling iframe removal error",t)}try{i=document.createElement('