Skip to content

Commit

Permalink
GPU: Cache encryption doc (#28137)
Browse files Browse the repository at this point in the history
### Details: Cache encryption doc
- following of #28035
rolled back
 - to add GPU availability check for GHA in c++ and python snippets

---------

Co-authored-by: Tomasz Krupa <[email protected]>
Co-authored-by: Sebastian Golebiewski <[email protected]>
  • Loading branch information
3 people authored Dec 19, 2024
1 parent f583af7 commit f5b85f1
Show file tree
Hide file tree
Showing 3 changed files with 75 additions and 3 deletions.
38 changes: 37 additions & 1 deletion docs/articles_en/assets/snippets/ov_caching.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,14 +90,50 @@ auto compiled = core.compile_model(model, device, config); // Step 5:
}
}

void part5() {
std::string modelPath = "/tmp/myModel.xml";
std::string device = "GPU";
ov::Core core; // Step 1: create ov::Core object
bool hasGPU = false; // Step 1a: Check if GPU is available
auto devices = core.get_available_devices();
for (auto&& supported : devices) {
hasGPU |= supported.find(device) != std::string::npos;
}
if(!hasGPU) {
return;
}
core.set_property(ov::cache_dir("/path/to/cache/dir")); // Step 1b: Enable caching
//! [ov:caching:part5]
static const char codec_key[] = {0x30, 0x60, 0x70, 0x02, 0x04, 0x08, 0x3F, 0x6F, 0x72, 0x74, 0x78, 0x7F};
auto codec_xor = [&](const std::string& source_str) {
auto key_size = sizeof(codec_key);
int key_idx = 0;
std::string dst_str = source_str;
for (char& c : dst_str) {
c ^= codec_key[key_idx % key_size];
key_idx++;
}
return dst_str;
};
auto compiled = core.compile_model(modelPath,
device,
ov::cache_encryption_callbacks(ov::EncryptionCallbacks{codec_xor, codec_xor}),
ov::cache_mode(ov::CacheMode::OPTIMIZE_SIZE)); // Step 5: Compile model
//! [ov:caching:part5]
if (!compiled) {
throw std::runtime_error("error");
}
}

int main() {
try {
part0();
part1();
part2();
part3();
part4();
part5();
} catch (...) {
}
return 0;
}
}
18 changes: 18 additions & 0 deletions docs/articles_en/assets/snippets/ov_caching.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,3 +59,21 @@ def decrypt_base64(src):
model = core.read_model(model=model_path)
compiled_model = core.compile_model(model=model, device_name=device_name, config=config_cache)
# ! [ov:caching:part4]

# ! [ov:caching:part5]
import base64

def encrypt_base64(src):
return base64.b64encode(bytes(src, "utf-8"))

def decrypt_base64(src):
return base64.b64decode(bytes(src, "utf-8"))

core = ov.Core()
if "GPU" in core.available_devices:
core.set_property({props.cache_dir: path_to_cache_dir})
config_cache = {}
config_cache["CACHE_ENCRYPTION_CALLBACKS"] = [encrypt_base64, decrypt_base64]
config_cache["CACHE_MODE"] = "OPTIMIZE_SIZE"
compiled_model = core.compile_model(model=model_path, device_name='GPU', config=config_cache)
# ! [ov:caching:part5]
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ To check in advance if a particular device supports model caching, your applicat
Set "cache_encryption_callbacks" config option to enable cache encryption
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

If model caching is enabled, the model topology can be encrypted when saving to the cache and decrypted when loading from the cache. This property can currently be set only in ``compile_model``.
If model caching is enabled in the CPU Plugin, the model topology can be encrypted while it is saved to the cache and decrypted when it is loaded from the cache. Currently, this property can be set only in ``compile_model``.

.. tab-set::

Expand All @@ -157,6 +157,24 @@ If model caching is enabled, the model topology can be encrypted when saving to
:language: cpp
:fragment: [ov:caching:part4]

If model caching is enabled in the GPU Plugin, the model topology can be encrypted while it is saved to the cache and decrypted when it is loaded from the cache. Full encryption only works when the ``CacheMode`` property is set to ``OPTIMIZE_SIZE``.

.. tab-set::

.. tab-item:: Python
:sync: py

.. doxygensnippet:: docs/articles_en/assets/snippets/ov_caching.py
:language: py
:fragment: [ov:caching:part5]

.. tab-item:: C++
:sync: cpp

.. doxygensnippet:: docs/articles_en/assets/snippets/ov_caching.cpp
:language: cpp
:fragment: [ov:caching:part5]

.. important::

Currently, this property is supported only by the CPU plugin. For other HW plugins, setting this property will not encrypt/decrypt the model topology in cache and will not affect performance.
Currently, this property is supported only by the CPU and GPU plugins. For other HW plugins, setting this property will not encrypt/decrypt the model topology in cache and will not affect performance.

0 comments on commit f5b85f1

Please sign in to comment.