Skip to content

Commit

Permalink
Fixed SpaceToBatch and BatchToSpace for 3d case (#18033)
Browse files Browse the repository at this point in the history
* Added functional tests for SpaceToBatch and BatchToSpace

* Added functional tests for template plugin
  • Loading branch information
steve-y authored Jul 3, 2023
1 parent deb6231 commit 6be030b
Show file tree
Hide file tree
Showing 7 changed files with 44 additions and 6 deletions.
2 changes: 1 addition & 1 deletion src/core/src/op/space_to_batch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto

auto data_shape = data->get_shape();

if (!(data->get_shape().size() == 4 || data->get_shape().size() == 5)) {
if (!(data->get_shape().size() == 3 || data->get_shape().size() == 4 || data->get_shape().size() == 5)) {
return false;
}

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_gpu/src/plugin/ops/batch_to_space.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ static void CreateBatchToSpaceOp(Program& p, const std::shared_ptr<ngraph::op::v

std::vector<int32_t> sizes = inConst->cast_vector<int32_t>();
int32_t default_size = i == 1 ? 1 : 0;
for (size_t s = sizes.size(); s < rank; s++) {
for (size_t s = sizes.size(); s < format.dimension(); s++) {
sizes.push_back(default_size);
}
tensor_inputs.emplace_back(format, sizes, default_size);
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_gpu/src/plugin/ops/space_to_batch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ static void CreateSpaceToBatchOp(Program& p, const std::shared_ptr<ngraph::op::v

std::vector<int32_t> sizes = inConst->cast_vector<int32_t>();
int32_t default_size = i == 1 ? 1 : 0;
for (size_t s = sizes.size(); s < rank; s++) {
for (size_t s = sizes.size(); s < format.dimension(); s++) {
sizes.push_back(default_size);
}
tensor_inputs.emplace_back(format, sizes, default_size);
Expand All @@ -41,14 +41,14 @@ static void CreateSpaceToBatchOp(Program& p, const std::shared_ptr<ngraph::op::v
// To be removed once we enable internal shape infer for all operations
auto out_size = output_pshape.is_static() ? tensor_from_dims(output_pshape.to_shape()) : cldnn::tensor();

auto batchToSpacePrim = cldnn::space_to_batch(layerName,
auto spaceToBatchPrim = cldnn::space_to_batch(layerName,
inputs[0], // input
tensor_inputs[0], // block_shape
tensor_inputs[1], // crops_begin
tensor_inputs[2], // crops_end
out_size);

p.add_primitive(*op, batchToSpacePrim);
p.add_primitive(*op, spaceToBatchPrim);
}

REGISTER_FACTORY_IMPL(v1, SpaceToBatch);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,17 @@ using namespace LayerTestsDefinitions;

namespace {
auto bts_only_test_cases = []() {
return std::vector<batchToSpaceParamsTuple>{batchToSpaceParamsTuple({1, 1, 2, 2},
return std::vector<batchToSpaceParamsTuple>{batchToSpaceParamsTuple({1, 2, 2},
{0, 0, 0},
{0, 0, 0},
{4, 1, 1},
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Layout::ANY,
InferenceEngine::Layout::ANY,
CommonTestUtils::DEVICE_GPU),
batchToSpaceParamsTuple({1, 1, 2, 2},
{0, 0, 0, 0},
{0, 0, 0, 0},
{4, 1, 1, 1},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,16 @@ namespace {

auto stb_only_test_cases = []() {
return std::vector<spaceToBatchParamsTuple>{
spaceToBatchParamsTuple({1, 2, 2},
{0, 0, 0},
{0, 0, 0},
{1, 2, 2},
InferenceEngine::Precision::FP32,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Precision::UNSPECIFIED,
InferenceEngine::Layout::ANY,
InferenceEngine::Layout::ANY,
CommonTestUtils::DEVICE_GPU),
spaceToBatchParamsTuple({1, 1, 2, 2},
{0, 0, 0, 0},
{0, 0, 0, 0},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,15 @@ std::vector<BatchToSpaceParams> generateBatchToSpaceParams() {
reference_tests::Tensor({2, 6}, IN_ET, std::vector<T>{1, 7, 2, 8, 3, 9, 4, 10, 5, 11, 6, 12}),
"input_with_shape_4x3"),

// input_with_shape_4x1x3
BatchToSpaceParams(
reference_tests::Tensor({4, 1, 3}, IN_ET, std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}),
reference_tests::Tensor({3}, element::i64, std::vector<int64_t>{1, 1, 2}),
reference_tests::Tensor({3}, element::i64, std::vector<int64_t>{0, 0, 0}),
reference_tests::Tensor({3}, element::i64, std::vector<int64_t>{0, 0, 0}),
reference_tests::Tensor({2, 1, 6}, IN_ET, std::vector<T>{1, 7, 2, 8, 3, 9, 4, 10, 5, 11, 6, 12}),
"input_with_shape_4x1x3"),

// input_with_shape_4x1x1x3
BatchToSpaceParams(
reference_tests::Tensor({4, 1, 1, 3}, IN_ET, std::vector<T>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,15 @@ template <element::Type_t IN_ET>
std::vector<SpaceToBatchParams> generateParams() {
using T = typename element_type_traits<IN_ET>::value_type;
std::vector<SpaceToBatchParams> batchToSpaceParams {
// space_to_batch_3D
SpaceToBatchParams(
reference_tests::Tensor({1, 2, 2}, IN_ET, std::vector<T>{1, 1, 1, 1}),
reference_tests::Tensor({3}, element::i64, std::vector<int64_t>{1, 1, 1}),
reference_tests::Tensor({3}, element::i64, std::vector<int64_t>{0, 0, 0}),
reference_tests::Tensor({3}, element::i64, std::vector<int64_t>{0, 0, 0}),
reference_tests::Tensor({1, 2, 2}, IN_ET, std::vector<T>{1, 1, 1, 1}),
"space_to_batch_4D"),

// space_to_batch_4D
SpaceToBatchParams(
reference_tests::Tensor({1, 1, 2, 2}, IN_ET, std::vector<T>{1, 1, 1, 1}),
Expand Down

0 comments on commit 6be030b

Please sign in to comment.