diff --git a/samples/cpp/benchmark_app/benchmark_app.hpp b/samples/cpp/benchmark_app/benchmark_app.hpp
index bd73b88122d56d..7650c2cd99db4d 100644
--- a/samples/cpp/benchmark_app/benchmark_app.hpp
+++ b/samples/cpp/benchmark_app/benchmark_app.hpp
@@ -14,15 +14,6 @@
#include "gflags/gflags.h"
-// gflags supports uint32 starting from v2.2 only
-#ifndef DEFINE_uint32
-# ifdef GFLAGS_NAMESPACE
-# define DEFINE_uint32(name, val, txt) DEFINE_VARIABLE(GFLAGS_NAMESPACE::uint32, U, name, val, txt)
-# else
-# define DEFINE_uint32(name, val, txt) DEFINE_VARIABLE(gflags::uint32, U, name, val, txt)
-# endif
-#endif
-
/// @brief message for help argument
static const char help_message[] = "Print the usage message";
@@ -300,27 +291,27 @@ DEFINE_string(c, "", custom_cldnn_message);
/// @brief Iterations count (default 0)
/// Sync mode: iterations count
/// Async mode: StartAsync counts
-DEFINE_uint32(niter, 0, iterations_count_message);
+DEFINE_uint64(niter, 0, iterations_count_message);
/// @brief Time to execute topology in seconds
-DEFINE_uint32(t, 0, execution_time_message);
+DEFINE_uint64(t, 0, execution_time_message);
/// @brief Number of infer requests in parallel
-DEFINE_uint32(nireq, 0, infer_requests_count_message);
+DEFINE_uint64(nireq, 0, infer_requests_count_message);
/// @brief Number of threads to use for inference on the CPU in throughput mode (also affects Hetero
/// cases)
-DEFINE_uint32(nthreads, 0, infer_num_threads_message);
+DEFINE_uint64(nthreads, 0, infer_num_threads_message);
/// @brief Number of streams to use for inference on the CPU (also affects Hetero cases)
DEFINE_string(nstreams, "", infer_num_streams_message);
/// @brief The percentile which will be reported in latency metric
-DEFINE_uint32(latency_percentile, 50, infer_latency_percentile_message);
+DEFINE_uint64(latency_percentile, 50, infer_latency_percentile_message);
/// @brief Define parameter for batch size
/// Default is 0 (that means don't specify)
-DEFINE_uint32(b, 0, batch_size_message);
+DEFINE_uint64(b, 0, batch_size_message);
// @brief Enable plugin messages
DEFINE_string(pin, "", infer_threads_pinning_message);
diff --git a/samples/cpp/benchmark_app/main.cpp b/samples/cpp/benchmark_app/main.cpp
index 2655997996b593..c849cc7eba539a 100644
--- a/samples/cpp/benchmark_app/main.cpp
+++ b/samples/cpp/benchmark_app/main.cpp
@@ -379,7 +379,7 @@ int main(int argc, char* argv[]) {
auto ov_perf_hint = get_performance_hint(device, core);
device_config.emplace(ov::hint::performance_mode(ov_perf_hint));
if (FLAGS_nireq != 0)
- device_config.emplace(ov::hint::num_requests(FLAGS_nireq));
+ device_config.emplace(ov::hint::num_requests(unsigned(FLAGS_nireq)));
// Set performance counter
if (isFlagSetInCommandLine("pc")) {
@@ -553,7 +553,7 @@ int main(int argc, char* argv[]) {
auto set_nthreads_pin = [&](const std::string& str) {
auto property_name = str == "nthreads" ? ov::inference_num_threads.name() : ov::affinity.name();
- auto property = str == "nthreads" ? ov::inference_num_threads(FLAGS_nthreads)
+ auto property = str == "nthreads" ? ov::inference_num_threads(int(FLAGS_nthreads))
: ov::affinity(fix_pin_option(FLAGS_pin));
if (supported(property_name) || device_name == "AUTO") {
// create nthreads/pin primary property for HW device or AUTO if -d is AUTO directly.
@@ -921,7 +921,7 @@ int main(int argc, char* argv[]) {
}
// Number of requests
- uint32_t nireq = FLAGS_nireq;
+ uint64_t nireq = FLAGS_nireq;
if (nireq == 0) {
if (FLAGS_api == "sync") {
nireq = 1;
@@ -938,7 +938,7 @@ int main(int argc, char* argv[]) {
}
// Iteration limit
- uint32_t niter = FLAGS_niter;
+ uint64_t niter = FLAGS_niter;
size_t shape_groups_num = app_inputs_info.size();
if ((niter > 0) && (FLAGS_api == "async")) {
if (shape_groups_num > nireq) {
@@ -958,7 +958,7 @@ int main(int argc, char* argv[]) {
}
// Time limit
- uint32_t duration_seconds = 0;
+ uint64_t duration_seconds = 0;
if (FLAGS_t != 0) {
// time limit
duration_seconds = FLAGS_t;
diff --git a/samples/cpp/benchmark_app/utils.hpp b/samples/cpp/benchmark_app/utils.hpp
index 6bb1eeabf61519..6db64df35a5aeb 100644
--- a/samples/cpp/benchmark_app/utils.hpp
+++ b/samples/cpp/benchmark_app/utils.hpp
@@ -15,11 +15,11 @@
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::nanoseconds ns;
-inline uint64_t get_duration_in_milliseconds(uint32_t duration) {
+inline uint64_t get_duration_in_milliseconds(uint64_t duration) {
return duration * 1000LL;
}
-inline uint64_t get_duration_in_nanoseconds(uint32_t duration) {
+inline uint64_t get_duration_in_nanoseconds(uint64_t duration) {
return duration * 1000000000LL;
}
diff --git a/tools/legacy/benchmark_app/benchmark_app.hpp b/tools/legacy/benchmark_app/benchmark_app.hpp
index b92f211f118e3a..52b8a65d3a48d1 100644
--- a/tools/legacy/benchmark_app/benchmark_app.hpp
+++ b/tools/legacy/benchmark_app/benchmark_app.hpp
@@ -6,15 +6,6 @@
#include
-// gflags supports uint32 starting from v2.2 only
-#ifndef DEFINE_uint32
-# ifdef GFLAGS_NAMESPACE
-# define DEFINE_uint32(name, val, txt) DEFINE_VARIABLE(GFLAGS_NAMESPACE::uint32, U, name, val, txt)
-# else
-# define DEFINE_uint32(name, val, txt) DEFINE_VARIABLE(gflags::uint32, U, name, val, txt)
-# endif
-#endif
-
#include
#include
#include
@@ -183,17 +174,17 @@ DEFINE_string(c, "", custom_cldnn_message);
/// @brief Iterations count (default 0)
/// Sync mode: iterations count
/// Async mode: StartAsync counts
-DEFINE_uint32(niter, 0, iterations_count_message);
+DEFINE_uint64(niter, 0, iterations_count_message);
/// @brief Time to execute topology in seconds
-DEFINE_uint32(t, 0, execution_time_message);
+DEFINE_uint64(t, 0, execution_time_message);
/// @brief Number of infer requests in parallel
-DEFINE_uint32(nireq, 0, infer_requests_count_message);
+DEFINE_uint64(nireq, 0, infer_requests_count_message);
/// @brief Number of threads to use for inference on the CPU in throughput mode (also affects Hetero
/// cases)
-DEFINE_uint32(nthreads, 0, infer_num_threads_message);
+DEFINE_uint64(nthreads, 0, infer_num_threads_message);
/// @brief Number of streams to use for inference on the CPU (also affects Hetero cases)
DEFINE_string(nstreams, "", infer_num_streams_message);
@@ -203,7 +194,7 @@ DEFINE_bool(enforcebf16, false, enforce_bf16_message);
/// @brief Define parameter for batch size
/// Default is 0 (that means don't specify)
-DEFINE_uint32(b, 0, batch_size_message);
+DEFINE_uint64(b, 0, batch_size_message);
// @brief Enable plugin messages
DEFINE_string(pin, "", infer_threads_pinning_message);
diff --git a/tools/legacy/benchmark_app/main.cpp b/tools/legacy/benchmark_app/main.cpp
index 9445a8af13bfd1..f0928d2b77fc6f 100644
--- a/tools/legacy/benchmark_app/main.cpp
+++ b/tools/legacy/benchmark_app/main.cpp
@@ -28,11 +28,11 @@ using namespace InferenceEngine;
static const size_t progressBarDefaultTotalCount = 1000;
-uint64_t getDurationInMilliseconds(uint32_t duration) {
+uint64_t getDurationInMilliseconds(uint64_t duration) {
return duration * 1000LL;
}
-uint64_t getDurationInNanoseconds(uint32_t duration) {
+uint64_t getDurationInNanoseconds(uint64_t duration) {
return duration * 1000000000LL;
}
@@ -454,7 +454,7 @@ int main(int argc, char* argv[]) {
}
// Number of requests
- uint32_t nireq = FLAGS_nireq;
+ uint64_t nireq = FLAGS_nireq;
if (nireq == 0) {
if (FLAGS_api == "sync") {
nireq = 1;
@@ -472,7 +472,7 @@ int main(int argc, char* argv[]) {
}
// Iteration limit
- uint32_t niter = FLAGS_niter;
+ uint64_t niter = FLAGS_niter;
if ((niter > 0) && (FLAGS_api == "async")) {
niter = ((niter + nireq - 1) / nireq) * nireq;
if (FLAGS_niter != niter) {
@@ -482,7 +482,7 @@ int main(int argc, char* argv[]) {
}
// Time limit
- uint32_t duration_seconds = 0;
+ uint64_t duration_seconds = 0;
if (FLAGS_t != 0) {
// time limit
duration_seconds = FLAGS_t;