Skip to content

Commit

Permalink
NeuralCoder JupyterLab extension update (intel#1281)
Browse files Browse the repository at this point in the history
* nc

* add copyright

* fix spelling

* restore

Co-authored-by: mengfeil <[email protected]>
  • Loading branch information
kaikaiyao and mengfei25 authored Sep 23, 2022
1 parent f60dcd1 commit 5ca1723
Show file tree
Hide file tree
Showing 135 changed files with 34,781 additions and 32,853 deletions.
31 changes: 31 additions & 0 deletions neural_coder/backends/onnx_inc_static_quant_qlinear.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

transformation:
location:
- insert_below_model_definition_line
content:
- |-
[+] from neural_compressor.experimental import Quantization, common
[+] from neural_compressor import options, conf
[+] conf.model.framework = 'onnxrt_qlinearops'
[+] conf.quantization.approach = 'post_training_static_quant'
[+] quantizer = Quantization(conf)
[+] quantizer.model = common.Model(MODEL_NAME)
[+] quantizer.calib_dataloader = DATALOADER_NAME
[+] quantizer.eval_func = EVAL_FUNCTION_NAME
[+] MODEL_NAME = quantizer()
order:
- below:
above:
11 changes: 8 additions & 3 deletions neural_coder/coders/autoinc/autoinc_harness.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,10 @@ def register_transformation(self):
if model_name + "(" in line or \
(model_name + "." in line and line.find(model_name) < line.find(".") and "(" in line):
to_transform = True
if not to_transform:
if not to_transform and domain_ == 'onnx':
pass
elif not to_transform:
continue

### information

# search DataLoader definition in this file
Expand Down Expand Up @@ -142,7 +143,7 @@ def register_transformation(self):
### check

bk_trans_content_this = bk_trans_content[bk_trans_location.index(loc)]
if file_path_idx == 0 and (domain_ == 'transformers_trainer' or domain_ == 'torchvision'):
if file_path_idx == 0 and (domain_ in ['transformers_trainer', 'torchvision', 'onnx']):
pass
elif ("INPUT_NAME" in bk_trans_content_this and input_name == "") \
or ("DATALOADER_NAME" in bk_trans_content_this and dataloader_name == "") \
Expand Down Expand Up @@ -262,6 +263,10 @@ def register_transformation(self):
lines_to_insert = lines_to_insert \
.replace("EVAL_FUNC_LINES", globals.list_eval_func_lines[0]) \
.replace("DATALOADER_NAME", globals.list_calib_dataloader_name[0])
elif domain_ =='onnx':
lines_to_insert = lines_to_insert \
.replace("EVAL_FUNCTION_NAME", globals.list_eval_func_name[0]) \
.replace("DATALOADER_NAME", globals.list_calib_dataloader_name[0])
else:
lines_to_insert = lines_to_insert \
.replace("DATALOADER_NAME", dataloader_name)
Expand Down
13 changes: 13 additions & 0 deletions neural_coder/coders/autoinc/calib_dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,5 +27,18 @@ def register_transformation(self):
pass
elif domain_ == 'torchvision':
globals.list_calib_dataloader_name.append('val_loader')
elif domain_ == 'onnx':
codes = open(globals.list_code_path[0], 'r').read().split('\n')
for line in codes:
line = line.strip()
if 'loader' in line and '=' in line:
end = 0
for i in range(len(line)):
if line[i] == '=':
end = i
if line[end-1] == ' ':
globals.list_calib_dataloader_name.append(line[:end-1])
else:
globals.list_calib_dataloader_name.append(line[:end])
else: # random model
pass
2 changes: 2 additions & 0 deletions neural_coder/coders/autoinc/domain.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,5 +23,7 @@ def determine_domain(path) -> str:
return 'transformers_trainer'
else:
return 'transformers_no_trainer'
elif 'onnx.load(' in codes:
return 'onnx'
else:
return 'random model'
19 changes: 19 additions & 0 deletions neural_coder/coders/autoinc/eval_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,5 +85,24 @@ def register_transformation(self):
lines[index] = '[+] ' + ' ' * 8 + line
lines = '\n'.join(lines)
globals.list_eval_func_lines.append(lines)
elif domain_ == 'onnx':
# look for sess = onnxruntime.InferenceSession(MODEL_NAME.SerializeToString(), None)
codes = open(globals.list_code_path[0], 'r').read().split('\n')
start = 0
for idx, line in enumerate(codes):
if "onnxruntime.InferenceSession(" in line:
start = idx
break
line_indent = get_line_indent_level(codes[start])
target = None
for i in range(start, -1, -1):
if "def" in codes[i] and (line_indent - get_line_indent_level(codes[i])) == 4:
target = codes[i].split(' ')[1]
break
func_name = None
for i in range(len(target)):
if target[i] == '(':
globals.list_eval_func_name.append(target[:i])
break
else: # random model
pass
37 changes: 29 additions & 8 deletions neural_coder/coders/pytorch/cuda_to_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,21 +22,42 @@ def __init__(self, file) -> None:

def transform(self):
lines = self.file.split('\n')
# determine if jump the whole file (in cases where: args.device, args.cuda etc)
to_jump = False
for line in lines:
if self.is_delete(line):
indent_level = get_line_indent_level(line)
new_line = " " * indent_level + "pass"
self.result.append(new_line)
elif self.is_modify(line):
new_line = self.change_to_cpu(line)
self.result.append(new_line)
else:
if self.is_jump_file(line):
to_jump = True
break

if to_jump: # this file do not need transformation
for line in lines:
self.result.append(line)
else: # this file might need transformation
for line in lines:
if self.is_delete(line):
indent_level = get_line_indent_level(line)
new_line = " " * indent_level + "pass"
self.result.append(new_line)
elif self.is_modify(line):
new_line = self.change_to_cpu(line)
self.result.append(new_line)
else:
self.result.append(line)
for index, line in enumerate(self.result):
if index != len(self.result)-1:
self.result[index] += '\n'
return ''.join(self.result)

def is_jump_file(self, s):
if "args.device" in s \
or "args.cpu" in s \
or "args.gpu" in s \
or "args.cuda" in s \
or "torch.cuda.is_available()" in s:
return True
else:
return False

def is_delete(self, s):
if 'cuda.' in s and '=' not in s and "if" not in s:
return True
Expand Down
1 change: 1 addition & 0 deletions neural_coder/docs/AutoQuant.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ This feature helps automatically enable quantization features on a PyTorch model

## Usage
- PyPI distribution with a one-line API call
- [JupyterLab extension](../extensions/README.md)

## Example
### PyPI distribution:
Expand Down
46 changes: 46 additions & 0 deletions neural_coder/docs/IntelCPU_PerformanceSetting.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
## Intel CPU Platforms: Best Performance Setting
### Install MKL, OpenMP and JEMALLOC
Through CONDA is the simplest way:
```python
conda install -y mkl mkl-include jemalloc
```

### Environment Variables
Check if your ```CONDA_PREFIX``` has a value by:
```
echo ${CONDA_PREFIX}
```
If it's empty, it means that you're not in a traditional CONDA environment, you need to find the location of the ```.so.``` files by:
```
find / -name "libjemalloc.so"
find / -name "libiomp5.so"
```
It will show the path these file were installed into, for example:
```
/home/name/lib/libjemalloc.so
/home/name/lib/libiomp5.so
```
And then you should export this path as ```CONDA_PREFIX```:
```
export CONDA_PREFIX="/home/name"
```
Finally:
```bash
export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libjemalloc.so
export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so
export MALLOC_CONF="oversize_threshold:1,background_thread:true,metadata_thp:auto,dirty_decay_ms:9000000000,muzzy_decay_ms:9000000000"
export KMP_AFFINITY="granularity=fine,compact,1,0"
export KMP_BLOCKTIME=1
export DNNL_PRIMITIVE_CACHE_CAPACITY=1024
```

### Frequency Governers
Check frequency governer state on your machine:
```bash
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
```
If is shows ```powersave``` instead of ```performance```, execute:
```
echo "performance" | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
```
Loading

0 comments on commit 5ca1723

Please sign in to comment.