Skip to content

Commit

Permalink
migrate to ov.save_model(...)
Browse files Browse the repository at this point in the history
  • Loading branch information
alexsu52 committed Oct 25, 2023
1 parent 9cc57c0 commit 3056252
Show file tree
Hide file tree
Showing 6 changed files with 10 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -165,12 +165,12 @@ def transform_fn(data_item):
# Benchmark performance, calculate compression rate and validate accuracy

fp32_ir_path = f"{ROOT}/stfpm_fp32.xml"
ov.serialize(ov_model, fp32_ir_path)
ov.save_model(ov_model, fp32_ir_path, compress_to_fp16=False)
print(f"[1/7] Save FP32 model: {fp32_ir_path}")
fp32_size = get_model_size(fp32_ir_path, verbose=True)

int8_ir_path = f"{ROOT}/stfpm_int8.xml"
ov.serialize(ov_quantized_model, int8_ir_path)
ov.save_model(ov_quantized_model, int8_ir_path, compress_to_fp16=False)
print(f"[2/7] Save INT8 model: {int8_ir_path}")
int8_size = get_model_size(int8_ir_path, verbose=True)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,12 +137,12 @@ def transform_fn(data_item):
# Benchmark performance, calculate compression rate and validate accuracy

fp32_ir_path = f"{ROOT}/mobilenet_v2_fp32.xml"
ov.serialize(ov_model, fp32_ir_path)
ov.save_model(ov_model, fp32_ir_path, compress_to_fp16=False)
print(f"[1/7] Save FP32 model: {fp32_ir_path}")
fp32_model_size = get_model_size(fp32_ir_path, verbose=True)

int8_ir_path = f"{ROOT}/mobilenet_v2_int8.xml"
ov.serialize(ov_quantized_model, int8_ir_path)
ov.save_model(ov_quantized_model, int8_ir_path, compress_to_fp16=False)
print(f"[2/7] Save INT8 model: {int8_ir_path}")
int8_model_size = get_model_size(int8_ir_path, verbose=True)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def main():
# Quantize mode in OpenVINO representation
quantized_model = quantize(ov_model, data_loader, validator)
quantized_model_path = Path(f"{ROOT}/{MODEL_NAME}_openvino_model/{MODEL_NAME}_quantized.xml")
ov.serialize(quantized_model, str(quantized_model_path))
ov.save_model(quantized_model, str(quantized_model_path), compress_to_fp16=False)

# Validate FP32 model
fp_stats, total_images, total_objects = validate(ov_model, tqdm(data_loader), validator)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def main():
quantized_model = quantize_ac(ov_model, data_loader, validator)

quantized_model_path = Path(f"{ROOT}/{MODEL_NAME}_openvino_model/{MODEL_NAME}_quantized.xml")
ov.serialize(quantized_model, str(quantized_model_path))
ov.save_model(quantized_model, str(quantized_model_path), compress_to_fp16=False)

# Validate FP32 model
fp_stats, total_images, total_objects = validate(ov_model, tqdm(data_loader), validator)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,12 +173,12 @@ def transform_fn(data_item):
ov_quantized_model = mo.convert_model(int8_onnx_path)

fp32_ir_path = f"{ROOT}/mobilenet_v2_fp32.xml"
ov.serialize(ov_model, fp32_ir_path)
ov.save_model(ov_model, fp32_ir_path, compress_to_fp16=False)
print(f"[1/7] Save FP32 model: {fp32_ir_path}")
fp32_model_size = get_model_size(fp32_ir_path, verbose=True)

int8_ir_path = f"{ROOT}/mobilenet_v2_int8.xml"
ov.serialize(ov_quantized_model, int8_ir_path)
ov.save_model(ov_quantized_model, int8_ir_path, compress_to_fp16=False)
print(f"[2/7] Save INT8 model: {int8_ir_path}")
int8_model_size = get_model_size(int8_ir_path, verbose=True)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,12 +163,12 @@ def main():
ov_quantized_model = mo.convert_model(int8_onnx_path)

fp32_ir_path = f"{ROOT}/ssd300_vgg16_fp32.xml"
ov.serialize(ov_model, fp32_ir_path)
ov.save_model(ov_model, fp32_ir_path, compress_to_fp16=False)
print(f"[1/7] Save FP32 model: {fp32_ir_path}")
fp32_model_size = get_model_size(fp32_ir_path, verbose=True)

int8_ir_path = f"{ROOT}/ssd300_vgg16_int8.xml"
ov.serialize(ov_quantized_model, int8_ir_path)
ov.save_model(ov_quantized_model, int8_ir_path, compress_to_fp16=False)
print(f"[2/7] Save INT8 model: {int8_ir_path}")
int8_model_size = get_model_size(int8_ir_path, verbose=True)

Expand Down

0 comments on commit 3056252

Please sign in to comment.