-
Notifications
You must be signed in to change notification settings - Fork 258
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Signed-off-by: zehao-intel <[email protected]>
- Loading branch information
1 parent
70a1d50
commit 922b247
Showing
44 changed files
with
5,428 additions
and
6 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
{ | ||
"tensorflow": { | ||
"bert_large_squad_model_zoo": { | ||
"model_src_dir": "nlp/bert_large_squad_model_zoo/quantization/ptq", | ||
"dataset_location": "/tf_dataset/tensorflow/bert/data", | ||
"input_model": "/tf_dataset/tensorflow/bert/fp32_bert_squad.pb", | ||
"main_script": "main.py", | ||
"batch_size": 64, | ||
"fp32_model_url": "https://storage.googleapis.com/intel-optimized-tensorflow/models/v2_7_0/fp32_bert_squad.pb" | ||
}, | ||
"opt_125m_sq": { | ||
"model_src_dir": "nlp/large_language_models/quantization/ptq/smoothquant", | ||
"dataset_location": "", | ||
"input_model": "facebook/opt-125m", | ||
"main_script": "main.py", | ||
"batch_size": 16 | ||
}, | ||
"gpt2_medium_sq": { | ||
"model_src_dir": "nlp/large_language_models/quantization/ptq/smoothquant", | ||
"dataset_location": "", | ||
"input_model": "gpt2-medium", | ||
"main_script": "main.py", | ||
"batch_size": 16 | ||
}, | ||
"gpt-j-6B": { | ||
"model_src_dir": "nlp/large_language_models/quantization/ptq/gpt-j", | ||
"dataset_location": "", | ||
"input_model": "/tf_dataset2/models/tensorflow/gpt-j-6B", | ||
"main_script": "main.py", | ||
"batch_size": 1 | ||
}, | ||
"ViT": { | ||
"model_src_dir": "image_recognition/vision_transformer/quantization/ptq", | ||
"dataset_location": "/tf_dataset/dataset/imagenet", | ||
"input_model": "/tf_dataset/tensorflow/vit/HF-ViT-Base16-Img224-frozen.pb", | ||
"main_script": "main.py", | ||
"batch_size": 32 | ||
}, | ||
"GraphSage": { | ||
"model_src_dir": "graph_networks/graphsage/quantization/ptq", | ||
"dataset_location": "/tf_dataset/dataset/ppi", | ||
"input_model": "/tf_dataset/tensorflow/graphsage/graphsage_frozen_model.pb", | ||
"main_script": "main.py", | ||
"batch_size": 1000 | ||
} | ||
} | ||
} | ||
|
109 changes: 109 additions & 0 deletions
109
examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/README.md
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,109 @@ | ||
Step-by-Step | ||
============ | ||
|
||
This document is used to list steps of reproducing TensorFlow Object Detection models tuning results. This example can run on Intel CPUs and GPUs. | ||
|
||
# Prerequisite | ||
|
||
|
||
## 1. Environment | ||
Recommend python 3.6 or higher version. | ||
|
||
### Install Intel® Neural Compressor | ||
```shell | ||
pip install neural-compressor | ||
``` | ||
|
||
### Install Intel Tensorflow | ||
```shell | ||
pip install intel-tensorflow | ||
``` | ||
> Note: Validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment). | ||
### Installation Dependency packages | ||
```shell | ||
cd examples\tensorflow\graph_networks\graphsage\quantization\ptq | ||
pip install -r requirements.txt | ||
``` | ||
|
||
### Install Intel Extension for Tensorflow | ||
|
||
#### Quantizing the model on Intel GPU(Mandatory to install ITEX) | ||
Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs. | ||
|
||
```shell | ||
pip install --upgrade intel-extension-for-tensorflow[xpu] | ||
``` | ||
For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers) | ||
|
||
#### Quantizing the model on Intel CPU(Optional to install ITEX) | ||
Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs. | ||
|
||
```shell | ||
pip install --upgrade intel-extension-for-tensorflow[cpu] | ||
``` | ||
|
||
> **Note**: | ||
> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX. | ||
## 2. Prepare Model | ||
Download Frozen graph: | ||
```shell | ||
wget https://storage.googleapis.com/intel-optimized-tensorflow/models/2_12_0/graphsage_frozen_model.pb | ||
``` | ||
|
||
## 3. Prepare Dataset | ||
|
||
```shell | ||
wget https://snap.stanford.edu/graphsage/ppi.zip | ||
unzip ppi.zip | ||
``` | ||
|
||
# Run | ||
|
||
## 1. Quantization | ||
|
||
```shell | ||
# The cmd of running faster_rcnn_resnet50 | ||
bash run_quant.sh --input_model=./graphsage_frozen_model.pb --output_model=./nc_graphsage_int8_model.pb --dataset_location=./ppi | ||
``` | ||
|
||
## 2. Benchmark | ||
```shell | ||
bash run_benchmark.sh --input_model=./nc_graphsage_int8_model.pb --dataset_location=./ppi --mode=performance | ||
``` | ||
|
||
Details of enabling Intel® Neural Compressor on graphsage for Tensorflow. | ||
========================= | ||
|
||
This is a tutorial of how to enable graphsage model with Intel® Neural Compressor. | ||
## User Code Analysis | ||
User specifies fp32 *model*, calibration dataset *calib_dataloader* and a custom *eval_func* which encapsulates the evaluation dataset and metric by itself. | ||
|
||
For graphsage, we applied the latter one because our philosophy is to enable the model with minimal changes. Hence we need to make two changes on the original code. The first one is to implement the q_dataloader and make necessary changes to *eval_func*. | ||
|
||
### Code update | ||
|
||
After prepare step is done, we just need update main.py like below. | ||
```python | ||
if args.tune: | ||
from neural_compressor.tensorflow import StaticQuantConfig, quantize_model | ||
from neural_compressor.tensorflow.utils import BaseDataLoader | ||
|
||
dataset = CustomDataset() | ||
calib_dataloader = BaseDataLoader(dataset=dataset, batch_size=1, collate_fn=collate_function) | ||
quant_config = StaticQuantConfig() | ||
q_model = quantize_model(args.input_graph, quant_config, calib_dataloader) | ||
q_model.save(args.output_graph) | ||
|
||
if args.benchmark: | ||
if args.mode == 'performance': | ||
evaluate(args.input_graph) | ||
elif args.mode == 'accuracy': | ||
acc_result = evaluate(args.input_graph) | ||
print("Batch size = %d" % args.batch_size) | ||
print("Accuracy: %.5f" % acc_result) | ||
|
||
``` | ||
|
||
The quantization.fit() function will return a best quantized model during timeout constrain. |
80 changes: 80 additions & 0 deletions
80
examples/3.x_api/tensorflow/graph_networks/graphsage/quantization/ptq/dataloader.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,80 @@ | ||
# | ||
# -*- coding: utf-8 -*- | ||
# | ||
# Copyright (c) 2024 Intel Corporation | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# | ||
|
||
import numpy as np | ||
import random | ||
import json | ||
import sys | ||
import os | ||
|
||
import networkx as nx | ||
from networkx.readwrite import json_graph | ||
|
||
|
||
def load_data(prefix, normalize=True, load_walks=False): | ||
G_data = json.load(open(prefix + "-G.json")) | ||
G = json_graph.node_link_graph(G_data) | ||
if isinstance(list(G.nodes())[0], int): | ||
conversion = lambda n : int(n) | ||
else: | ||
conversion = lambda n : n | ||
|
||
if os.path.exists(prefix + "-feats.npy"): | ||
feats = np.load(prefix + "-feats.npy") | ||
else: | ||
print("No features present.. Only identity features will be used.") | ||
feats = None | ||
id_map = json.load(open(prefix + "-id_map.json")) | ||
id_map = {conversion(k):int(v) for k,v in id_map.items()} | ||
walks = [] | ||
class_map = json.load(open(prefix + "-class_map.json")) | ||
if isinstance(list(class_map.values())[0], list): | ||
lab_conversion = lambda n : n | ||
else: | ||
lab_conversion = lambda n : int(n) | ||
|
||
class_map = {conversion(k):lab_conversion(v) for k,v in class_map.items()} | ||
|
||
## Remove all nodes that do not have val/test annotations | ||
## (necessary because of networkx weirdness with the Reddit data) | ||
broken_count = 0 | ||
for node in G.nodes(): | ||
if not 'val' in G.nodes[node] or not 'test' in G.nodes[node]: | ||
G.remove_node(node) | ||
broken_count += 1 | ||
print("Removed {:d} nodes that lacked proper annotations due to networkx versioning issues".format(broken_count)) | ||
|
||
## Make sure the graph has edge train_removed annotations | ||
## (some datasets might already have this..) | ||
print("Loaded data.. now preprocessing..") | ||
for edge in G.edges(): | ||
if (G.nodes[edge[0]]['val'] or G.nodes[edge[1]]['val'] or | ||
G.nodes[edge[0]]['test'] or G.nodes[edge[1]]['test']): | ||
G[edge[0]][edge[1]]['train_removed'] = True | ||
else: | ||
G[edge[0]][edge[1]]['train_removed'] = False | ||
|
||
if normalize and not feats is None: | ||
from sklearn.preprocessing import StandardScaler | ||
train_ids = np.array([id_map[n] for n in G.nodes() if not G.nodes[n]['val'] and not G.nodes[n]['test']]) | ||
train_feats = feats[train_ids] | ||
scaler = StandardScaler() | ||
scaler.fit(train_feats) | ||
feats = scaler.transform(feats) | ||
|
||
return G, feats, id_map, walks, class_map |
Oops, something went wrong.