Skip to content

Commit

Permalink
Fix Quickstart Example on Readme (#1082)
Browse files Browse the repository at this point in the history
Signed-off-by: zehao-intel <[email protected]>
(cherry picked from commit 1276f0f)
  • Loading branch information
zehao-intel authored and chensuyue committed Jul 21, 2023
1 parent 8af7437 commit a7721fc
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 21 deletions.
6 changes: 2 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,8 @@ pip install tensorflow
wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/mobilenet_v1_1.0_224_frozen.pb
```
```python
from neural_compressor.data import DataLoader, Datasets
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.data import DataLoader
from neural_compressor.data import Datasets

dataset = Datasets('tensorflow')['dummy'](shape=(1, 224, 224, 3))
dataloader = DataLoader(framework='tensorflow', dataset=dataset)
Expand All @@ -56,8 +55,7 @@ from neural_compressor.quantization import fit
q_model = fit(
model="./mobilenet_v1_1.0_224_frozen.pb",
conf=PostTrainingQuantConfig(),
calib_dataloader=dataloader,
eval_dataloader=dataloader)
calib_dataloader=dataloader)
```

## Documentation
Expand Down
9 changes: 3 additions & 6 deletions docs/source/get_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,20 +15,17 @@ pip install tensorflow
wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/mobilenet_v1_1.0_224_frozen.pb
```
```python
from neural_compressor.data import DataLoader, Datasets
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.data import DataLoader
from neural_compressor.data import Datasets

dataset = Datasets('tensorflow')['dummy'](shape=(1, 224, 224, 3))
dataloader = DataLoader(framework='tensorflow', dataset=dataset)

from neural_compressor.quantization import fit
config = PostTrainingQuantConfig()
q_model = fit(
model="./mobilenet_v1_1.0_224_frozen.pb",
conf=config,
calib_dataloader=dataloader,
eval_dataloader=dataloader)
conf=PostTrainingQuantConfig(),
calib_dataloader=dataloader)
```

## Validated Models
Expand Down
6 changes: 1 addition & 5 deletions examples/helloworld/tf_example7/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,8 @@ We can quantize a model only needing to set the dataloader with dummy dataset to
dataloader = DataLoader(framework='tensorflow', dataset=dataset)
# Post Training Quantization Config
config = PostTrainingQuantConfig()
# Built-in topk metric
top1 = Metric(name="topk", k=1)
# Just call fit to do quantization.
q_model = fit(model="./mobilenet_v1_1.0_224_frozen.pb",
conf=config,
calib_dataloader=dataloader,
eval_dataloader=dataloader,
eval_metric=top1)
calib_dataloader=dataloader)
```
7 changes: 1 addition & 6 deletions examples/helloworld/tf_example7/test.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from neural_compressor import Metric
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.data import DataLoader
from neural_compressor.data import Datasets
Expand All @@ -13,14 +12,10 @@ def main():
dataloader = DataLoader(framework='tensorflow', dataset=dataset)
# Post Training Quantization Config
config = PostTrainingQuantConfig()
# Built-in topk metric
top1 = Metric(name="topk", k=1)
# Just call fit to do quantization.
q_model = fit(model="./mobilenet_v1_1.0_224_frozen.pb",
conf=config,
calib_dataloader=dataloader,
eval_dataloader=dataloader,
eval_metric=top1)
calib_dataloader=dataloader)


if __name__ == "__main__":
Expand Down

0 comments on commit a7721fc

Please sign in to comment.