Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into ci/gha/pypi-cache…
Browse files Browse the repository at this point in the history
…-problems
  • Loading branch information
akashchi committed Dec 12, 2024
2 parents 46cea19 + 702ce05 commit c560048
Show file tree
Hide file tree
Showing 666 changed files with 30,628 additions and 23,480 deletions.
Original file line number Diff line number Diff line change
@@ -1,38 +1,38 @@
name: Export workflow metrics (BETA)
name: Export workflow metrics

on:
workflow_run:
workflows:
- Android ARM64 with vcpkg
- Android x64
- Cleanup caches
- Coverity (Ubuntu 20.04, Python 3.11)
- Debian 10 ARM
- Fedora 29 (RHEL 8.4), Python 3.9
- Linux (Ubuntu 20.04, Python 3.9)
- Linux (Ubuntu 22.04, Python 3.11)
- Linux (Ubuntu 24.04, Python 3.12)
- Linux ARM64 (Ubuntu 20.04, Python 3.11)
- Linux Static CC (Ubuntu 22.04, Python 3.11, Clang)
- Linux RISC-V with Conan (Ubuntu 22.04, Python 3.10)
- Linux (Ubuntu 22.04, Python 3.11, Intel DPC++ Compiler)
- Linux CPU Plugin Snippets with LIBXSMM (Ubuntu 20.04)
- Linux Sanitizers (Ubuntu 20.04, Python 3.9)
- macOS (Python 3.11)
- macOS ARM64 (Python 3.11)
- Manylinux 2014
- Webassembly
- Windows (VS 2019, Python 3.11, Release)
- Windows (VS 2019, Python 3.11, Debug)
- Windows Conditional Compilation (VS 2022, Python 3.11)
- Rerun Workflow with Known Errors
- "Android ARM64 with vcpkg"
- "Android x64"
- "Cleanup caches"
- "Coverity (Ubuntu 20.04, Python 3.11)"
- "Debian 10 ARM"
- "Fedora 29 (RHEL 8.4), Python 3.9"
- "Linux (Ubuntu 20.04, Python 3.9)"
- "Linux (Ubuntu 22.04, Python 3.11)"
- "Linux (Ubuntu 24.04, Python 3.12)"
- "Linux ARM64 (Ubuntu 20.04, Python 3.11)"
- "Linux Static CC (Ubuntu 22.04, Python 3.11, Clang)"
- "Linux RISC-V with Conan (Ubuntu 22.04, Python 3.10)"
- "Linux (Ubuntu 22.04, Python 3.11, Intel DPC\\+\\+ Compiler)"
- "Linux CPU Plugin Snippets with LIBXSMM (Ubuntu 20.04)"
- "Linux Sanitizers (Ubuntu 20.04, Python 3.9)"
- "macOS (Python 3.11)"
- "macOS ARM64 (Python 3.11)"
- "Manylinux 2014"
- "Webassembly"
- "Windows (VS 2019, Python 3.11, Release)"
- "Windows (VS 2019, Python 3.11, Debug)"
- "Windows Conditional Compilation (VS 2022, Python 3.11)"
- "Rerun Workflow with Known Errors"
types:
- completed

permissions: read-all

jobs:
otel-export-trace:
export-workflow-metrics:
name: Export finished workflow metrics
runs-on: aks-linux-2-cores-8gb
if: ${{ github.repository_owner == 'openvinotoolkit' }}
Expand Down
75 changes: 75 additions & 0 deletions .github/workflows/workflows_to_track.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
==> ./stale_prs_and_issues.yml <==
name: 'Close stale issues and PRs'
==> ./build_doc.yml <==
name: Documentation
==> ./ovc.yml <==
name: OVC
==> ./ubuntu_22.yml <==
name: Linux (Ubuntu 22.04, Python 3.11)
==> ./windows_conditional_compilation.yml <==
name: Windows Conditional Compilation (VS 2022, Python 3.11)
==> ./send_workflows_to_opentelemetry.yml <==
name: Export workflow metrics (BETA)
==> ./ubuntu_22_dpcpp.yml <==
name: Linux (Ubuntu 22.04, Python 3.11, Intel DPC++ Compiler)
==> ./coverage.yml <==
name: Code coverage
==> ./linux_conditional_compilation.yml <==
name: Linux Static CC (Ubuntu 22.04, Python 3.11, Clang)
==> ./workflows_scans.yml <==
name: GitHub Actions Workflows Scans
==> ./check_pr_commits.yml <==
name: PR Commits
==> ./windows_vs2019_debug.yml <==
name: Windows (VS 2019, Python 3.11, Debug)
==> ./files_size.yml <==
name: Files Size
==> ./cleanup_caches.yml <==
name: Cleanup caches
==> ./mac.yml <==
name: macOS (Python 3.11)
==> ./merge_queue_stub.yml <==
==> ./debian_10_arm.yml <==
name: Debian 10 ARM
==> ./android_arm64.yml <==
name: Android ARM64 with vcpkg
==> ./code_style.yml <==
name: Code Style
==> ./manylinux_2014.yml <==
name: Manylinux 2014
==> ./linux_arm64.yml <==
name: Linux ARM64 (Ubuntu 20.04, Python 3.11)
==> ./dev_cpu_linux_snippets_libxsmm.yml <==
name: Linux CPU Plugin Snippets with LIBXSMM (Ubuntu 20.04)
==> ./labeler.yml <==
name: "Pull Request Labeler"
==> ./mac_arm64.yml <==
name: macOS ARM64 (Python 3.11)
==> ./dependency_review.yml <==
name: 'Dependency Review'
==> ./fedora_29.yml <==
name: Fedora 29 (RHEL 8.4), Python 3.9
==> ./code_snippets.yml <==
name: Code snippets
==> ./ubuntu_20.yml <==
name: Linux (Ubuntu 20.04, Python 3.9)
==> ./linux_riscv.yml <==
name: Linux RISC-V with Conan (Ubuntu 22.04, Python 3.10)
==> ./android_x64.yml <==
name: Android x64
==> ./workflow_rerunner.yml <==
name: Rerun Workflow with Known Errors
==> ./linux_sanitizers.yml <==
name: Linux Sanitizers (Ubuntu 20.04, Python 3.9)
==> ./py_checks.yml <==
name: Python API Checks
==> ./webassembly.yml <==
name: Webassembly
==> ./ubuntu_24.yml <==
name: Linux (Ubuntu 24.04, Python 3.12)
==> ./assign_issue.yml <==
name: Take Issue
==> ./windows_vs2019_release.yml <==
name: Windows (VS 2019, Python 3.11, Release)
==> ./coverity.yml <==
name: Coverity (Ubuntu 20.04, Python 3.11)
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ Most Efficient Large Language Models for AI PC

This page is regularly updated to help you identify the best-performing LLMs on the
Intel® Core™ Ultra processor family and AI PCs.
The current data is as of OpenVINO 2024.4, 24 Oct. 2024
The current data is as of OpenVINO 2024.4, 20 Nov. 2024.

The tables below list the key performance indicators for inference on built-in GPUs.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ Below are the instructions on how to install the OpenCL packages on supported Li
.. code-block:: sh
apt-get install -y ocl-icd-libopencl1 intel-opencl-icd intel-level-zero-gpu level-zero
sudo usermod -a -G render $LOGNAME
.. tab-item:: Ubuntu 20.04 LTS
:sync: ubuntu-20
Expand All @@ -57,6 +58,7 @@ Below are the instructions on how to install the OpenCL packages on supported Li
echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu focal-legacy main' | tee /etc/apt/sources.list.d/intel.gpu.focal.list && \
apt-get update
apt-get update && apt-get install -y --no-install-recommends intel-opencl-icd intel-level-zero-gpu level-zero
sudo usermod -a -G render $LOGNAME
Alternatively, download older `deb` version from `here <https://github.com/intel/compute-runtime/releases>`__. Note that older driver version might not include some of the bug fixes and might be not supported on some latest platforms. Check the supported hardware for the versions you are installing.

Expand Down Expand Up @@ -135,6 +137,6 @@ Additional Resources
* `Docker CI framework for Intel® Distribution of OpenVINO™ toolkit <https://github.com/openvinotoolkit/docker_ci/blob/master/README.md>`__
* `Get Started with DockerHub CI for Intel® Distribution of OpenVINO™ toolkit <https://github.com/openvinotoolkit/docker_ci/blob/master/get-started.md>`__
* `Dockerfiles with Intel® Distribution of OpenVINO™ toolkit <https://github.com/openvinotoolkit/docker_ci/blob/master/dockerfiles/README.md>`__

* `GPU Driver issue troubleshoot <https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/docs/gpu_plugin_driver_troubleshooting.md>`


Binary file modified docs/sphinx_setup/_static/download/GenAI_Quick_Start_Guide.pdf
Binary file not shown.
2 changes: 1 addition & 1 deletion samples/python/benchmark/bert_benchmark/bert_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
import tempfile
from time import perf_counter

import datasets
import openvino as ov
import datasets
from openvino.runtime import get_version
from transformers import AutoTokenizer
from transformers.onnx import export
Expand Down
6 changes: 6 additions & 0 deletions src/bindings/js/node/src/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,12 @@ Napi::Object TensorWrap::wrap(Napi::Env env, ov::Tensor tensor) {
}

Napi::Value TensorWrap::get_data(const Napi::CallbackInfo& info) {
Napi::Env env = info.Env();
if (info.Length() > 0) {
reportError(env, "getData() does not accept any arguments.");
return env.Undefined();
}

auto type = _tensor.get_element_type();

switch (type) {
Expand Down
6 changes: 6 additions & 0 deletions src/bindings/js/node/tests/unit/tensor.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,12 @@ describe('ov.Tensor tests', () => {
assert.deepStrictEqual(tensor.getData(), data);
});

it('getData should throw an error if arguments are provided', () => {
const tensor = new ov.Tensor(ov.element.f32, shape, data);
assert.throws(() => tensor.getData(1), {
message: 'getData() does not accept any arguments.',
});
});
it('test tensor.data setter - different element type throws', () => {
const float64Data = Float64Array.from([1, 2, 3]);
const tensor = new ov.Tensor(ov.element.f32, [1, 3]);
Expand Down
13 changes: 10 additions & 3 deletions src/frontends/pytorch/src/op/stft.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include "openvino/op/convert_like.hpp"
#include "openvino/op/divide.hpp"
#include "openvino/op/shape_of.hpp"
#include "openvino/op/sqrt.hpp"
#include "openvino/op/unsqueeze.hpp"
#include "utils.hpp"

Expand Down Expand Up @@ -66,8 +67,6 @@ OutputVector translate_stft(const NodeContext& context) {
if (!context.input_is_none(5)) {
normalized = context.const_input<bool>(5);
}
PYTORCH_OP_CONVERSION_CHECK(!normalized,
"aten::stft conversion is currently supported with normalized=False only.");

bool onesided = true;
if (!context.input_is_none(6)) {
Expand All @@ -85,7 +84,15 @@ OutputVector translate_stft(const NodeContext& context) {
// Perform STFT
constexpr bool transpose_frames = true;
auto stft = context.mark_node(std::make_shared<v15::STFT>(input, window, n_fft, hop_length, transpose_frames));
return {stft};

if (normalized) {
const auto nfft_convert = context.mark_node(std::make_shared<v1::ConvertLike>(n_fft, stft));
const auto divisor = context.mark_node(std::make_shared<v0::Sqrt>(nfft_convert));
const auto norm_stft = context.mark_node(std::make_shared<v1::Divide>(stft, divisor));
return {norm_stft};
} else {
return {stft};
}
};
} // namespace op
} // namespace pytorch
Expand Down
3 changes: 2 additions & 1 deletion src/plugins/intel_cpu/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,8 @@ ov_add_plugin(NAME ${TARGET_NAME}
DEVICE_NAME "CPU"
AS_EXTENSION
VERSION_DEFINES_FOR src/plugin.cpp
SOURCES ${SOURCES} ${HEADERS})
SOURCES ${SOURCES} ${HEADERS}
ADD_CLANG_FORMAT)

# give a different file name depending on target platform architecture
if(ARM OR AARCH64)
Expand Down
31 changes: 15 additions & 16 deletions src/plugins/intel_cpu/src/cache/cache_entry.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,36 +4,34 @@

#pragma once

#include <memory>
#include <functional>
#include <memory>

#include "lru_cache.h"

namespace ov {
namespace intel_cpu {

class CacheEntryBase {
public:
enum class LookUpStatus : int8_t {
Hit,
Miss
};
enum class LookUpStatus : int8_t { Hit, Miss };

public:
virtual ~CacheEntryBase() = default;
};

/**
* @brief Class represents a templated record in multi cache
* @tparam KeyType is a key type that must define hash() const method with return type convertible to size_t and define comparison operator.
* @tparam KeyType is a key type that must define hash() const method with return type convertible to size_t and define
* comparison operator.
* @tparam ValType is a type that must meet all the requirements to the std::unordered_map mapped type
* @tparam ImplType is a type for the internal storage. It must provide put(KeyType, ValueType) and ValueType get(const KeyType&)
* interface and must have constructor of type ImplType(size_t).
* @tparam ImplType is a type for the internal storage. It must provide put(KeyType, ValueType) and ValueType get(const
* KeyType&) interface and must have constructor of type ImplType(size_t).
*
* @note In this implementation default constructed value objects are treated as empty objects.
*/

template<typename KeyType,
typename ValType,
typename ImplType = LruCache<KeyType, ValType>>
template <typename KeyType, typename ValType, typename ImplType = LruCache<KeyType, ValType>>
class CacheEntry : public CacheEntryBase {
public:
using ResultType = std::pair<ValType, LookUpStatus>;
Expand All @@ -42,11 +40,12 @@ class CacheEntry : public CacheEntryBase {
explicit CacheEntry(size_t capacity) : _impl(capacity) {}

/**
* @brief Searches the key in the underlying storage and returns value if it exists, or creates a value using the builder functor and adds it to
* the underlying storage.
* @brief Searches the key in the underlying storage and returns value if it exists, or creates a value using the
* builder functor and adds it to the underlying storage.
* @param key is the search key
* @param builder is a callable object that creates the ValType object from the KeyType lval reference
* @return result of the operation which is a pair of the requested object of ValType and the status of whether the cache hit or miss occurred
* @return result of the operation which is a pair of the requested object of ValType and the status of whether the
* cache hit or miss occurred
*/

ResultType getOrCreate(const KeyType& key, std::function<ValType(const KeyType&)> builder) {
Expand All @@ -70,5 +69,5 @@ class CacheEntry : public CacheEntryBase {
ImplType _impl;
};

} // namespace intel_cpu
} // namespace ov
} // namespace intel_cpu
} // namespace ov
21 changes: 11 additions & 10 deletions src/plugins/intel_cpu/src/cache/lru_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@

/**
* @brief This is yet another implementation of a preemptive cache with LRU eviction policy.
* @tparam Key is a key type that must define hash() const method with return type convertible to size_t and define comparison operator.
* @tparam Key is a key type that must define hash() const method with return type convertible to size_t and define
* comparison operator.
* @tparam Value is a type that must meet all the requirements to the std::unordered_map mapped type
*
* @attention This cache implementation IS NOT THREAD SAFE!
Expand All @@ -19,7 +20,7 @@
namespace ov {
namespace intel_cpu {

template<typename Key, typename Value>
template <typename Key, typename Value>
class LruCache {
public:
using value_type = std::pair<Key, Value>;
Expand All @@ -33,7 +34,7 @@ class LruCache {
* @param value
*/

void put(const Key &key, const Value &val) {
void put(const Key& key, const Value& val) {
if (0 == _capacity) {
return;
}
Expand All @@ -56,7 +57,7 @@ class LruCache {
* @return Value associated with the key or default constructed instance of the Value type.
*/

Value get(const Key &key) {
Value get(const Key& key) {
auto itr = _cacheMapper.find(key);
if (itr == _cacheMapper.end()) {
return Value();
Expand All @@ -82,13 +83,13 @@ class LruCache {
* @brief Returns the current capacity value
* @return the current capacity value
*/
size_t getCapacity() const noexcept {
return _capacity;
}
size_t getCapacity() const noexcept {
return _capacity;
}

private:
struct key_hasher {
std::size_t operator()(const Key &k) const {
std::size_t operator()(const Key& k) const {
return k.hash();
}
};
Expand All @@ -105,5 +106,5 @@ class LruCache {
size_t _capacity;
};

} // namespace intel_cpu
} // namespace ov
} // namespace intel_cpu
} // namespace ov
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/cache/multi_cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,5 @@ namespace intel_cpu {

std::atomic_size_t MultiCache::_typeIdCounter{0};

} // namespace intel_cpu
} // namespace ov
} // namespace intel_cpu
} // namespace ov
Loading

0 comments on commit c560048

Please sign in to comment.