diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp index 65acb0beb66ba0..de7f51b071ae53 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp @@ -846,27 +846,6 @@ void prepare_buffer_fusing::run(program& p) { if (user_info.first) { node.get_users().front()->set_output_layout(user_info.second); } - - // In case that the rank of weight node of gemm is less than 4 and, - // it transforms to extend to 4 dims by adding 1 to begin(). - // Therefore, the padding of crop_layout should be shifted properly. - const size_t TDIM = 4; - auto user = node.get_users().front(); - bool allow_new_shape_infer = node.get_program().is_new_shape_infer(); - if (!allow_new_shape_infer && user->is_type() && user->get_dependency(1).id().compare(node.id()) == 0) { - auto input_rank = user->get_kernel_impl_params()->typed_desc()->weight_rank; - if (input_rank < TDIM) { - std::vector l_pad = {0, 0, 0, 0}; - std::vector u_pad = {0, 0, 0, 0}; - - //shift right - size_t shift_right = TDIM - input_rank; - std::copy_n(crop_layout.data_padding._lower_size.begin(), l_pad.size() - shift_right, l_pad.begin() + shift_right); - std::copy_n(crop_layout.data_padding._upper_size.begin(), u_pad.size() - shift_right, u_pad.begin() + shift_right); - - crop_layout.data_padding = padding(l_pad, u_pad); - } - } } node.set_output_layout(crop_layout); node.can_be_optimized(true); diff --git a/src/plugins/intel_gpu/src/graph/strided_slice.cpp b/src/plugins/intel_gpu/src/graph/strided_slice.cpp index 685ab413153361..e8f93191ded819 100644 --- a/src/plugins/intel_gpu/src/graph/strided_slice.cpp +++ b/src/plugins/intel_gpu/src/graph/strided_slice.cpp @@ -177,7 +177,7 @@ void strided_slice_inst::update_output_memory() { if (!can_be_optimized()) return; - if (node->get_program().is_new_shape_infer() && input_memory_ptr() == nullptr) + if (input_memory_ptr() == nullptr) return; if (static_cast(_outputs[0]) && _network.get_engine().is_the_same_buffer(output_memory(), input_memory())) diff --git a/src/plugins/intel_gpu/tests/unit/dynamic_execution/skip_gather_at_runtime.cpp b/src/plugins/intel_gpu/tests/unit/dynamic_execution/skip_gather_at_runtime.cpp index 6ca8073c7658c3..5beb6b0f83f9d9 100644 --- a/src/plugins/intel_gpu/tests/unit/dynamic_execution/skip_gather_at_runtime.cpp +++ b/src/plugins/intel_gpu/tests/unit/dynamic_execution/skip_gather_at_runtime.cpp @@ -62,7 +62,6 @@ TEST_P(skip_gather_at_runtime_test, runtime_skip) { reorder("reorder", input_info("gather"), format::get_default_format(input1_rank), data_types::f32)); ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); config.set_property(ov::intel_gpu::optimize_data(true)); network network(engine, topology, config);