Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Align style doc with samples #5709

Merged
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion cmake/developer_package/add_ie_target.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ addIeTarget(
function(addIeTarget)
set(options
ADD_CPPLINT # Enables code style checks for the target
ADD_CLANG_FORMAT # Enables code style checks for the target
)
set(oneValueRequiredArgs
TYPE # type of target, SHARED|STATIC|EXECUTABLE. SHARED and STATIC correspond to add_library, EXECUTABLE to add_executable
Expand Down Expand Up @@ -119,6 +120,10 @@ function(addIeTarget)
# code style
add_cpplint_target(${ARG_NAME}_cpplint FOR_TARGETS ${ARG_NAME})
endif()
if (ARG_ADD_CLANG_FORMAT)
# code style
add_clang_format_target(${ARG_NAME}_clang FOR_TARGETS ${ARG_NAME})
endif()
if (ARG_DEVELOPER_PACKAGE)
# developer package
openvino_developer_export_targets(COMPONENT ${ARG_DEVELOPER_PACKAGE}
Expand All @@ -128,7 +133,6 @@ function(addIeTarget)
# Provide default compile pdb name equal to target name
set_target_properties(${ARG_NAME} PROPERTIES COMPILE_PDB_NAME ${ARG_NAME})
endif()

endfunction()

#[[
Expand Down
11 changes: 9 additions & 2 deletions cmake/developer_package/plugins/plugins.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,10 @@ endif()
# )
#
function(ie_add_plugin)
set(options SKIP_INSTALL)
set(options
SKIP_INSTALL
ADD_CLANG_FORMAT
)
set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR)
set(multiValueArgs SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS)
cmake_parse_arguments(IE_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
Expand Down Expand Up @@ -73,7 +76,11 @@ function(ie_add_plugin)
string(CONCAT custom_filter "${custom_filter}" "," "${filter}")
endforeach()

add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter})
if (IE_PLUGIN_ADD_CLANG_FORMAT)
add_clang_format_target(${IE_PLUGIN_NAME}_clang FOR_TARGETS ${IE_PLUGIN_NAME})
else()
add_cpplint_target(${IE_PLUGIN_NAME}_cpplint FOR_TARGETS ${IE_PLUGIN_NAME} CUSTOM_FILTERS ${custom_filter})
endif()

# check that plugin with such name is not registered

Expand Down
25 changes: 25 additions & 0 deletions docs/.clang-format
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
BasedOnStyle: Google
IndentWidth: 4
UseTab: Never

Language: Cpp
Standard: Cpp11

AccessModifierOffset: -4
AlignConsecutiveMacros: true
AllowAllArgumentsOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: Never
AllowShortLambdasOnASingleLine: Empty
AllowShortLoopsOnASingleLine: false
AlwaysBreakBeforeMultilineStrings: false
ColumnLimit: 160
# Specialize this comment pragma in order to avoid changes in SEA copyrights
CommentPragmas: '^#'
DerivePointerAlignment: false
FixNamespaceComments: true
IndentCaseLabels: false
IndentPPDirectives: BeforeHash
SpaceBeforeCpp11BracedList: true
SpaceBeforeCtorInitializerColon: false
1 change: 1 addition & 0 deletions docs/onnx_custom_op/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,5 @@ find_package(ngraph REQUIRED COMPONENTS onnx_importer)
add_library(${TARGET_NAME} STATIC onnx_custom_op.cpp)

target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} ${ONNX_IMPORTER_LIBRARIES})
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})
# [cmake:onnx_custom_op]
12 changes: 6 additions & 6 deletions docs/snippets/Bfloat16Inference0.cpp
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
#include <inference_engine.hpp>

int main() {
using namespace InferenceEngine;
//! [part0]
InferenceEngine::Core core;
auto cpuOptimizationCapabilities = core.GetMetric("CPU", METRIC_KEY(OPTIMIZATION_CAPABILITIES)).as<std::vector<std::string>>();
//! [part0]
return 0;
using namespace InferenceEngine;
//! [part0]
InferenceEngine::Core core;
auto cpuOptimizationCapabilities = core.GetMetric("CPU", METRIC_KEY(OPTIMIZATION_CAPABILITIES)).as<std::vector<std::string>>();
//! [part0]
return 0;
}
16 changes: 8 additions & 8 deletions docs/snippets/Bfloat16Inference1.cpp
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
#include <inference_engine.hpp>

int main() {
using namespace InferenceEngine;
//! [part1]
InferenceEngine::Core core;
auto network = core.ReadNetwork("sample.xml");
auto exeNetwork = core.LoadNetwork(network, "CPU");
auto enforceBF16 = exeNetwork.GetConfig(PluginConfigParams::KEY_ENFORCE_BF16).as<std::string>();
//! [part1]
using namespace InferenceEngine;
//! [part1]
InferenceEngine::Core core;
auto network = core.ReadNetwork("sample.xml");
auto exeNetwork = core.LoadNetwork(network, "CPU");
auto enforceBF16 = exeNetwork.GetConfig(PluginConfigParams::KEY_ENFORCE_BF16).as<std::string>();
//! [part1]

return 0;
return 0;
}
12 changes: 6 additions & 6 deletions docs/snippets/Bfloat16Inference2.cpp
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
#include <inference_engine.hpp>

int main() {
using namespace InferenceEngine;
//! [part2]
InferenceEngine::Core core;
core.SetConfig({ { CONFIG_KEY(ENFORCE_BF16), CONFIG_VALUE(NO) } }, "CPU");
//! [part2]
using namespace InferenceEngine;
//! [part2]
InferenceEngine::Core core;
core.SetConfig({{CONFIG_KEY(ENFORCE_BF16), CONFIG_VALUE(NO)}}, "CPU");
//! [part2]

return 0;
return 0;
}
1 change: 1 addition & 0 deletions docs/snippets/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -68,3 +68,4 @@ endif()

target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api
ngraph inference_engine_transformations)
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})
18 changes: 9 additions & 9 deletions docs/snippets/CPU_Kernel.cpp
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
#include <inference_engine.hpp>

int main() {
using namespace InferenceEngine;
//! [part0]
InferenceEngine::Core core;
// Load CPU extension as a shared library
auto extension_ptr = std::make_shared<InferenceEngine::Extension>(std::string{"<shared lib path>"});
// Add extension to the CPU device
core.AddExtension(extension_ptr, "CPU");
//! [part0]
using namespace InferenceEngine;
//! [part0]
InferenceEngine::Core core;
// Load CPU extension as a shared library
auto extension_ptr = std::make_shared<InferenceEngine::Extension>(std::string {"<shared lib path>"});
// Add extension to the CPU device
core.AddExtension(extension_ptr, "CPU");
//! [part0]

return 0;
return 0;
}
73 changes: 36 additions & 37 deletions docs/snippets/DynamicBatching.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,41 +2,40 @@
#include <vector>

int main() {
int FLAGS_bl = 1;
auto imagesData = std::vector<std::string>(2);
auto imagesData2 = std::vector<std::string>(4);
//! [part0]
int dynBatchLimit = FLAGS_bl; //take dynamic batch limit from command line option

// Read network model
InferenceEngine::Core core;
InferenceEngine::CNNNetwork network = core.ReadNetwork("sample.xml");


// enable dynamic batching and prepare for setting max batch limit
const std::map<std::string, std::string> dyn_config =
{ { InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES } };
network.setBatchSize(dynBatchLimit);

// create executable network and infer request
auto executable_network = core.LoadNetwork(network, "CPU", dyn_config);
auto infer_request = executable_network.CreateInferRequest();

// ...

// process a set of images
// dynamically set batch size for subsequent Infer() calls of this request
size_t batchSize = imagesData.size();
infer_request.SetBatch(batchSize);
infer_request.Infer();

// ...

// process another set of images
batchSize = imagesData2.size();
infer_request.SetBatch(batchSize);
infer_request.Infer();
//! [part0]

return 0;
int FLAGS_bl = 1;
auto imagesData = std::vector<std::string>(2);
auto imagesData2 = std::vector<std::string>(4);
//! [part0]
int dynBatchLimit = FLAGS_bl; // take dynamic batch limit from command line option

// Read network model
InferenceEngine::Core core;
InferenceEngine::CNNNetwork network = core.ReadNetwork("sample.xml");

// enable dynamic batching and prepare for setting max batch limit
const std::map<std::string, std::string> dyn_config = {
{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}};
network.setBatchSize(dynBatchLimit);

// create executable network and infer request
auto executable_network = core.LoadNetwork(network, "CPU", dyn_config);
auto infer_request = executable_network.CreateInferRequest();

// ...

// process a set of images
// dynamically set batch size for subsequent Infer() calls of this request
size_t batchSize = imagesData.size();
infer_request.SetBatch(batchSize);
infer_request.Infer();

// ...

// process another set of images
batchSize = imagesData2.size();
infer_request.SetBatch(batchSize);
infer_request.Infer();
//! [part0]

return 0;
}
20 changes: 10 additions & 10 deletions docs/snippets/GPU_Kernel.cpp
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
#include <inference_engine.hpp>

int main() {
using namespace InferenceEngine;
//! [part0]
InferenceEngine::Core core;
// Load GPU Extensions
core.SetConfig({ { InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "<path_to_the_xml_file>" } }, "GPU");
//! [part0]
using namespace InferenceEngine;
//! [part0]
InferenceEngine::Core core;
// Load GPU Extensions
core.SetConfig({{InferenceEngine::PluginConfigParams::KEY_CONFIG_FILE, "<path_to_the_xml_file>"}}, "GPU");
//! [part0]

//! [part1]
core.SetConfig({ { PluginConfigParams::KEY_DUMP_KERNELS, PluginConfigParams::YES } }, "GPU");
//! [part1]
//! [part1]
core.SetConfig({{PluginConfigParams::KEY_DUMP_KERNELS, PluginConfigParams::YES}}, "GPU");
//! [part1]

return 0;
return 0;
}
16 changes: 8 additions & 8 deletions docs/snippets/GPU_Kernels_Tuning.cpp
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
#include <inference_engine.hpp>

int main() {
using namespace InferenceEngine;
//! [part0]
Core ie;
ie.SetConfig({{ CONFIG_KEY(TUNING_MODE), CONFIG_VALUE(TUNING_CREATE) }}, "GPU");
ie.SetConfig({{ CONFIG_KEY(TUNING_FILE), "/path/to/tuning/file.json" }}, "GPU");
// Further LoadNetwork calls will use the specified tuning parameters
//! [part0]
using namespace InferenceEngine;
//! [part0]
Core ie;
ie.SetConfig({{CONFIG_KEY(TUNING_MODE), CONFIG_VALUE(TUNING_CREATE)}}, "GPU");
ie.SetConfig({{CONFIG_KEY(TUNING_FILE), "/path/to/tuning/file.json"}}, "GPU");
// Further LoadNetwork calls will use the specified tuning parameters
//! [part0]

return 0;
return 0;
}
15 changes: 7 additions & 8 deletions docs/snippets/Graph_debug_capabilities0.cpp
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
#include <inference_engine.hpp>
#include <ngraph/pass/visualize_tree.hpp>


int main() {
using namespace InferenceEngine;
//! [part0]
std::shared_ptr<ngraph::Function> nGraph;
// ...
ngraph::pass::VisualizeTree("after.png").run_on_function(nGraph); // Visualize the nGraph function to an image
//! [part0]
return 0;
using namespace InferenceEngine;
//! [part0]
std::shared_ptr<ngraph::Function> nGraph;
// ...
ngraph::pass::VisualizeTree("after.png").run_on_function(nGraph); // Visualize the nGraph function to an image
//! [part0]
return 0;
}
17 changes: 8 additions & 9 deletions docs/snippets/Graph_debug_capabilities1.cpp
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
#include <inference_engine.hpp>
#include <ngraph/pass/visualize_tree.hpp>


int main() {
using namespace InferenceEngine;
//! [part1]
std::shared_ptr<ngraph::Function> nGraph;
// ...
CNNNetwork network(nGraph);
network.serialize("test_ir.xml", "test_ir.bin");
//! [part1]
return 0;
using namespace InferenceEngine;
//! [part1]
std::shared_ptr<ngraph::Function> nGraph;
// ...
CNNNetwork network(nGraph);
network.serialize("test_ir.xml", "test_ir.bin");
//! [part1]
return 0;
}
17 changes: 9 additions & 8 deletions docs/snippets/HETERO0.cpp
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
#include <inference_engine.hpp>
#include <ngraph/ngraph.hpp>

#include "hetero/hetero_plugin_config.hpp"

int main() {
InferenceEngine::Core core;
auto network = core.ReadNetwork("sample.xml");
auto function = network.getFunction();
//! [part0]
for (auto && op : function->get_ops())
op->get_rt_info()["affinity"] = std::make_shared<ngraph::VariantWrapper<std::string>>("CPU");
//! [part0]
return 0;
InferenceEngine::Core core;
auto network = core.ReadNetwork("sample.xml");
auto function = network.getFunction();
//! [part0]
for (auto&& op : function->get_ops())
op->get_rt_info()["affinity"] = std::make_shared<ngraph::VariantWrapper<std::string>>("CPU");
//! [part0]
return 0;
}
Loading