Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…into col2im_python
  • Loading branch information
p-wysocki committed May 22, 2024
2 parents 2dd2c30 + e7c60bb commit 72ad8b1
Show file tree
Hide file tree
Showing 33 changed files with 572 additions and 80 deletions.
6 changes: 3 additions & 3 deletions src/bindings/js/docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
-DENABLE_WHEEL=OFF \
-DENABLE_PYTHON=OFF \
-DENABLE_INTEL_GPU=OFF \
-DCMAKE_INSTALL_PREFIX=../src/bindings/js/node/bin \
-DCMAKE_INSTALL_PREFIX="../src/bindings/js/node/bin" \
..
```
- Build the bindings:
Expand All @@ -58,11 +58,11 @@
- Run tests to make sure that **openvino-node** has been built successfully:
```bash
npm run test
```
```

## Usage

- Add the **openvino-node** package to your project by specifying it in **package.json**:
- Add the **openvino-node** package to your project by specifying it in **package.json**:
```json
"openvino-node": "file:*path-to-current-directory*"
```
Expand Down
9 changes: 5 additions & 4 deletions src/bindings/js/node/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,15 @@ if(WIN32)

set(CMAKE_JS_NODELIB_DEF ${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/node-lib.def)
set(CMAKE_JS_NODELIB_TARGET ${CMAKE_JS_LIB})
set(DELAYIMP_LIB delayimp.lib)
endif()

cmake_minimum_required(VERSION 3.14)

project(ov_node_addon)

set(CMAKE_CXX_STANDARD 17)
add_definitions(-DNAPI_VERSION=6)
add_definitions(-DNAPI_VERSION=8)

include(FetchContent)

Expand Down Expand Up @@ -76,11 +77,11 @@ target_include_directories(${PROJECT_NAME} PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/.."
)

target_link_libraries(${PROJECT_NAME} PRIVATE openvino::runtime ${CMAKE_JS_LIB})
target_link_libraries(${PROJECT_NAME} PRIVATE openvino::runtime ${DELAYIMP_LIB} ${CMAKE_JS_LIB})

if(MSVC AND CMAKE_JS_NODELIB_DEF AND CMAKE_JS_NODELIB_TARGET)
# Generate node.lib
execute_process(COMMAND ${CMAKE_AR} /def:${CMAKE_JS_NODELIB_DEF} /out:${CMAKE_JS_NODELIB_TARGET} ${CMAKE_STATIC_LINKER_FLAGS})
# Generate node.lib
execute_process(COMMAND ${CMAKE_AR} /def:${CMAKE_JS_NODELIB_DEF} /out:${CMAKE_JS_NODELIB_TARGET} ${CMAKE_STATIC_LINKER_FLAGS})
endif()

if(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ def __init__(self, options):
"torch.ops.aten.avg_pool2d.default": None,
"torch.ops.aten.avg_pool3d.default": None,
"torch.ops.aten.baddbmm.default": None,
"torch.ops.aten.bitwise_and.Scalar": None,
"torch.ops.aten.bitwise_and.Tensor": None,
"torch.ops.aten.bitwise_not.default": None,
"torch.ops.aten.bitwise_or.Tensor": None,
Expand Down Expand Up @@ -253,12 +254,17 @@ def __init__(self, options):
"torch.ops.quantized_decomposed.dequantize_per_channel.default": None

}

self.enabled_op_names = []

for op in _get_disabled_ops(options):
del support_dict[op]

super().__init__(support_dict)

def enable_by_name(self, node: Node):
self.enabled_op_names.append(node.name)

def is_node_supported(self, submodules: t.Mapping[str, Module], node: Node) -> bool:
# OpenVINO FX subgraph should be purely functional
if node.op not in CALLABLE_NODE_OPS:
Expand All @@ -272,4 +278,7 @@ def is_node_supported(self, submodules: t.Mapping[str, Module], node: Node) -> b
if target in self._support_dict:
return True

if node.name in self.enabled_op_names:
return True

return super().is_node_supported(submodules, node)
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,59 @@ def check_fully_supported(self, graph_module: GraphModule) -> bool:
return True
return False

def capture_gptq_patterns(self, graph_module: GraphModule) -> bool:
for node in graph_module.graph.nodes:
if str(node.op) == "call_function" and str(node.target) == "aten.bitwise_and.Scalar":
bitwise_and_in_nodes = node.all_input_nodes
if len(bitwise_and_in_nodes) != 1:
continue
to_copy_node = bitwise_and_in_nodes[0]
if str(to_copy_node.op) != "call_function" or str(to_copy_node.target) != "aten._to_copy.default":
continue
to_copy_in_nodes = to_copy_node.all_input_nodes
if len(to_copy_in_nodes) != 1:
continue
bitwise_right_shift_node = to_copy_in_nodes[0]
if str(bitwise_right_shift_node.op) != "call_function" or str(bitwise_right_shift_node.target) != "aten.bitwise_right_shift.Tensor":
continue
bitwise_right_shift_in_nodes = bitwise_right_shift_node.all_input_nodes
if len(bitwise_right_shift_in_nodes) != 2:
continue
expand_node = bitwise_right_shift_in_nodes[0]
if str(expand_node.op) != "call_function" or str(expand_node.target) != "aten.expand.default":
continue
expand_in_nodes = expand_node.all_input_nodes
if len(expand_in_nodes) != 1:
continue
unsqueeze_0_node = expand_in_nodes[0]
if str(unsqueeze_0_node.op) != "call_function" or str(unsqueeze_0_node.target) != "aten.unsqueeze.default":
continue
unsqueeze_0_in_nodes = unsqueeze_0_node.all_input_nodes
if len(unsqueeze_0_in_nodes) != 1:
continue
const_0_node = unsqueeze_0_in_nodes[0]
if str(const_0_node.op) != "get_attr":
continue
unsqueeze_1_node = bitwise_right_shift_in_nodes[1]
if str(unsqueeze_1_node.op) != "call_function" or str(unsqueeze_1_node.target) != "aten.unsqueeze.default":
continue
unsqueeze_1_in_nodes = unsqueeze_1_node.all_input_nodes
if len(unsqueeze_1_in_nodes) != 1:
continue
const_1_node = unsqueeze_1_in_nodes[0]
if str(const_1_node.op) != "get_attr":
continue

self.supported_ops.enable_by_name(node)
self.supported_ops.enable_by_name(to_copy_node)
self.supported_ops.enable_by_name(bitwise_right_shift_node)
self.supported_ops.enable_by_name(expand_node)
self.supported_ops.enable_by_name(unsqueeze_0_node)
self.supported_ops.enable_by_name(unsqueeze_1_node)

def make_partitions(self, graph_module: GraphModule, options) -> GraphModule:
allow_single_node_partition = _is_testing(options)
self.capture_gptq_patterns(graph_module)
partitioner = CapabilityBasedPartitioner(
graph_module, self.supported_ops, allows_single_node_partition=allow_single_node_partition)
partitions = partitioner.propose_partitions()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,15 +83,7 @@ class NodeContext : public frontend::NodeContext {

Any get_values_from_const_input(int index) const override;

// TODO: upstream to base class
OutputVector inputs() const {
OutputVector res;
for (auto input : m_decoder_inputs) {
FRONT_END_GENERAL_CHECK(m_tensor_map->count(input), "No tensor corresponding index: ", input, " exist.");
res.push_back(m_tensor_map->at(input));
}
return res;
}
OutputVector inputs() const;

Any get_input_type(size_t index) const {
return m_decoder->get_input_type(index);
Expand Down
7 changes: 7 additions & 0 deletions src/frontends/pytorch/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
#include "transforms/rfftn_complex_replacer.hpp"
#include "transforms/softmax_reshape_elimination.hpp"
#include "transforms/string_equality_replacer.hpp"
#include "transforms/torchfx_gptq_pattern_replacer.hpp"
#include "transforms/tuple_unpack_replacer.hpp"
#include "transforms/u4_block_repack.hpp"
#include "translate_session.hpp"
Expand Down Expand Up @@ -172,6 +173,12 @@ std::shared_ptr<Model> FrontEnd::decode(const InputModel::Ptr& model) const {
void FrontEnd::normalize(const std::shared_ptr<ov::Model>& model) const {
ov::pass::Manager manager;

// GPTQ transformations need to be executed before other passes
// Once the GPTQ patterns are modified by other transformations,
// they cannot be captured anymore
manager.register_pass<ov::frontend::pytorch::pass::GPTQDecompressionReplacer>();
manager.register_pass<ov::frontend::pytorch::pass::GPTQMultPatternReplacer>();

// the following 2 transformations are needed for keypoint detectron2 models to work.
// AtenIndexToSelect will be called twice
manager.register_pass<ov::pass::ConvertConvertLike>();
Expand Down
18 changes: 18 additions & 0 deletions src/frontends/pytorch/src/node_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,24 @@ std::shared_ptr<ov::Model> NodeContext::convert_subgraph(size_t index) const {
return model;
}

OutputVector NodeContext::inputs() const {
OutputVector res;
for (size_t i = 0; i < m_decoder_inputs.size(); i++) {
auto input = m_decoder_inputs.at(i);
if (input == 0) {
// Case when input can be inlined (possible only for fx decoder)
if (m_decoder->is_input_inlined(i)) {
auto inlined_input = m_decoder->inlined_input(i);
FRONT_END_GENERAL_CHECK(inlined_input.size() == 1, "Incorrect inlined input with index:", i);
res.push_back(inlined_input[0]);
}
}
FRONT_END_GENERAL_CHECK(m_tensor_map->count(input), "No tensor corresponding input: ", input, " exist.");
res.push_back(m_tensor_map->at(input));
}
return res;
}

bool NodeContext::input_is_none(size_t index) const {
bool res = index >= m_inputs_is_none.size() || m_inputs_is_none.at(index);
if (!res) {
Expand Down
1 change: 1 addition & 0 deletions src/frontends/pytorch/src/op_table.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -784,6 +784,7 @@ const std::map<std::string, CreatorFunction> get_supported_ops_fx() {
{"aten.avg_pool2d.default", op::translate_avg_poolnd},
{"aten.avg_pool3d.default", op::translate_avg_poolnd},
{"aten.baddbmm.default", op::translate_addmm_fx},
{"aten.bitwise_and.Scalar", op::translate_bitwise_and},
{"aten.bitwise_and.Tensor", op::translate_bitwise_and},
{"aten.bitwise_not.default", op::translate_bitwise_not},
{"aten.bitwise_or.Tensor", op::translate_bitwise_or},
Expand Down
Loading

0 comments on commit 72ad8b1

Please sign in to comment.