Skip to content

Commit

Permalink
Added MockNPU/CPU, FakeNPU/CPU and functional tests
Browse files Browse the repository at this point in the history
  • Loading branch information
AsyaPronina committed Jul 15, 2024
1 parent ced1970 commit 934ed93
Show file tree
Hide file tree
Showing 10 changed files with 1,520 additions and 0 deletions.
6 changes: 6 additions & 0 deletions src/plugins/intel_npu/tests/functional/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ ov_add_test_target(
"${CMAKE_CURRENT_SOURCE_DIR}/shared_tests_instances"
"${CMAKE_CURRENT_SOURCE_DIR}/behavior"
"${CMAKE_CURRENT_SOURCE_DIR}/internal"
${OpenVINO_SOURCE_DIR}/src/plugins/intel_npu/src/plugin/npuw
${OpenVINO_SOURCE_DIR}/src/plugins/intel_npu/src/utils/include
${OpenVINO_SOURCE_DIR}/src/plugins/intel_npu/src/plugin/include
${OpenVINO_SOURCE_DIR}/src/plugins/intel_npu/src/al/include
LINK_LIBRARIES
${OPTIONAL_FUNC_TESTS_LIBS}
openvino::func_test_utils
Expand All @@ -56,6 +60,8 @@ if(MSVC)
target_compile_options(${TARGET_NAME} PRIVATE /Zc:preprocessor)
endif()

target_compile_definitions(${TARGET_NAME} PRIVATE CI_BUILD_NUMBER=\"mock_version\")

install(
TARGETS ${TARGET_NAME}
RUNTIME DESTINATION tests
Expand Down
150 changes: 150 additions & 0 deletions src/plugins/intel_npu/tests/functional/behavior/npuw/memory.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include <gtest/gtest.h>
#include <functional>
#include <chrono>
#include <fstream>
#include <algorithm>
#include "openvino/runtime/tensor.hpp"

namespace {

float ms_to_run(std::function<void()> &&body) {
namespace khr = std::chrono;
const auto s = khr::steady_clock::now();
body();
const auto f = khr::steady_clock::now();
return khr::duration_cast<khr::microseconds>(f - s).count() / 1000.0;
};

struct MSec {
constexpr static const char *name = "ms";
};

template<typename T, typename U>
class metric {
std::vector<T> records;
T vmin = std::numeric_limits<T>::max();
T vmax = std::numeric_limits<T>::min();
std::string unit;

public:
void operator += (T &&t) {
vmin = std::min(vmin, t);
vmax = std::max(vmax, t);
records.push_back(std::move(t));
}
float avg() const {
float acc = 0.f;
for (auto &&t : records) {
acc += static_cast<float>(t) / records.size();
}
return acc;
}
T med() const {
std::vector<T> cpy(records);
std::nth_element(cpy.begin(), cpy.begin() + cpy.size()/2, cpy.end());
return cpy[cpy.size()/2];
}
friend std::ostream& operator<< (std::ostream &os, const metric<T, U> &m) {
const char *units = U::name;
os << "[ min = " << m.vmin << units
<< ", avg = " << m.avg() << units
<< ", med = " << m.med() << units
<< ", max = " << m.vmax << units
<< " in " << m.records.size()
<< " records ]";
return os;
}
};

uint8_t tmax(const ov::Tensor &t) {
auto *ptr = static_cast<uint8_t*>(t.data());

int i = 0, size = t.get_byte_size();
uint8_t prev = ptr[0], m = ptr[0];
while (++i < size) {
m = std::max(m, static_cast<uint8_t>(prev > ptr[i] ? prev - ptr[i] : ptr[i] - prev));
}
return m;
}

} // namespace

TEST(NPUWMemory, DISABLED_TestCopy400MiB_Buf2Buf) {

const auto type = ov::element::f16;
const auto shape = ov::Shape{196,1024,1024};

ov::Tensor from(type, shape);
ov::Tensor to (type, shape);

metric<float, MSec> copy_ms, proc_ms;
for (int i = 0; i < 32; i++) {
copy_ms += ms_to_run([&](){ from.copy_to(to); });
proc_ms += ms_to_run([&](){ tmax(to); });
}
std::cout << copy_ms << std::endl;
std::cout << proc_ms << std::endl;
}

TEST(NPUWMemory, DISABLED_TestCopy400MiB_Buf2Buf_13GiB) {

const auto type = ov::element::f16;
const auto shape = ov::Shape{196,1024,1024};

const int BLOCKS = 32;
std::vector<ov::Tensor> bank;
bank.reserve(BLOCKS);
for (int i = 0; i < BLOCKS; i++) {
bank.push_back(ov::Tensor(type, shape));
}

ov::Tensor to(type, shape);
metric<float, MSec> copy_ms, proc_ms;
for (auto &&from : bank) {
copy_ms += ms_to_run([&](){ from.copy_to(to); });
proc_ms += ms_to_run([&](){ tmax(to); });
}
std::cout << copy_ms << std::endl;
std::cout << proc_ms << std::endl;
}

TEST(NPUWMemory, DISABLED_TestCopy400MiB_Disk2Buf_13GiB) {
const auto type = ov::element::f16;
const auto shape = ov::Shape{196,1024,1024};
const int BLOCKS = 32;

ov::Tensor t(type, shape);
const std::size_t SIZE = t.get_byte_size();

// Prepare data first
std::vector<std::string> flist;
for (int i = 0; i < BLOCKS; i++) {
flist.push_back("tmp_tensor_" + std::to_string(i) + ".bin");
}
if (std::getenv("NPUW_TEST_DUMP")) {
std::cout << "Generating tensors..." << std::endl;
for (auto &&f : flist) {
std::ofstream ofs(f, std::ios_base::out | std::ios_base::binary);
ofs.write(static_cast<const char*>(t.data()),
static_cast<std::streamsize>(t.get_byte_size()));
}
}

// Now test; read with good ol' stdio
metric<float, MSec> read_ms, proc_ms;
for (auto &&f : flist) {
FILE *fp = fopen(f.c_str(), "r");
if (!fp) {
GTEST_SKIP();
}
read_ms += ms_to_run([&](){ (void)fread(t.data(), 1, SIZE, fp); });
proc_ms += ms_to_run([&](){ tmax(t); });
fclose(fp);
}
std::cout << read_ms << std::endl;
std::cout << proc_ms << std::endl;
}
Loading

0 comments on commit 934ed93

Please sign in to comment.