Skip to content
This repository has been archived by the owner on Jan 10, 2023. It is now read-only.

Commit

Permalink
Drop 12.1
Browse files Browse the repository at this point in the history
  • Loading branch information
tponieck committed Jan 3, 2019
1 parent 8c4ccde commit f91d7d8
Show file tree
Hide file tree
Showing 144 changed files with 7,970 additions and 1,054 deletions.
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@ You can find more information [here](https://software.intel.com/en-us/openvino-t

## Changelog

### Drop 12.1
- gtests code refactor
- buildbreak fix

### Drop 12.0
New features:
- pyramidRoiAlign primitive
Expand Down
5 changes: 4 additions & 1 deletion api/C/cldnn.h
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,8 @@ typedef struct
/*cldnn_priority_mode_type*/ int16_t priority_mode; ///< Priority mode (support of OpenCL priority hints in command queue).
/*cldnn_throttle_mode_type*/ int16_t throttle_mode; ///< Throttle mode (support of throttle hints in command queue).
uint32_t enable_memory_pool; ///< Enables memory usage optimization. memory objects will be reused when possible.
//const char* tuning_cache_path; ///< Enables defining other than default path to tuning cache json file
void* context;
const char* tuning_cache_path; ///< Enables defining other than default path to tuning cache json
} cldnn_engine_configuration;

/// @brief Information about the engine returned by cldnn_get_engine_info().
Expand Down Expand Up @@ -277,6 +278,8 @@ typedef enum /*:int32_t*/
cldnn_format_fyxb, ///< format not used inside clDNN, but supported in reorder as extension for user provided formats.
cldnn_format_os_iyx_osv16, ///< format used only for convolution weights: os - output feature maps slice, i - input feature maps, yx - spatials, sv16 - 16 values of single slice.
///< \n \image html os_iyx_osv16.jpg
cldnn_format_os_iyx_osv32, ///< format used only for convolution weights: os - output feature maps slice, i - input feature maps, yx - spatials, sv32 - 32 values of single slice.
cldnn_format_os_iyx_osv64, ///< format used only for convolution weights: os - output feature maps slice, i - input feature maps, yx - spatials, sv64 - 64 values of single slice.
cldnn_format_bs_xs_xsv8_bsv8, ///< format used only for fully connected weights: bs - batch slice, xs - x slice, bsv8 - 8 values of single slice.
///< \n \image html bs_xs_xsv8_bsv8.jpg
cldnn_format_bs_xs_xsv8_bsv16,///< format used only for fully connected weights: bs - batch slice, xs - x slice, bsv16 - 16 values of single slice.
Expand Down
6 changes: 3 additions & 3 deletions api/C/pyramidROIAlign.h → api/C/pyramid_roi_align.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,11 @@
extern "C" {
#endif

CLDNN_BEGIN_PRIMITIVE_DESC(PyramidROIAlign)
CLDNN_BEGIN_PRIMITIVE_DESC(pyramid_roi_align)

CLDNN_END_PRIMITIVE_DESC(PyramidROIAlign)
CLDNN_END_PRIMITIVE_DESC(pyramid_roi_align)

CLDNN_DECLARE_PRIMITIVE_TYPE_ID(PyramidROIAlign);
CLDNN_DECLARE_PRIMITIVE_TYPE_ID(pyramid_roi_align);


#ifdef __cplusplus
Expand Down
20 changes: 12 additions & 8 deletions api/CPP/engine.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,9 @@ struct engine_configuration
const priority_mode_types priority_mode; ///< Priority mode (support of priority hints in command queue). If cl_khr_priority_hints extension is not supported by current OpenCL implementation, the value must be set to cldnn_priority_disabled.

const throttle_mode_types throttle_mode; ///< Throttle mode (support of throttle hints in command queue). If cl_khr_throttle_hints extension is not supported by current OpenCL implementation, the value must be set to cldnn_throttle_disabled.

bool enable_memory_pool; ///< Enables memory usage optimization. memory objects will be reused when possible (switched off for older drivers then NEO).
//const std::string tuning_cache_path; ///< Path to tuning kernel cache
void* context; ///< Pointer to user context
const std::string tuning_cache_path; ///< Path to tuning kernel cache

/// @brief Constructs engine configuration with specified options.
/// @param profiling Enable per-primitive profiling.
Expand All @@ -86,8 +86,9 @@ struct engine_configuration
const std::string& sources_dumps_dir = std::string(),
priority_mode_types priority_mode = priority_mode_types::disabled,
throttle_mode_types throttle_mode = throttle_mode_types::disabled,
bool memory_pool = true)
//const std::string& tuning_cache_path = "cache.json")
bool memory_pool = true,
void* context = nullptr,
const std::string& tuning_cache_path = "cache.json")
: enable_profiling(profiling)
, meaningful_kernels_names(decorate_kernel_names)
, dump_custom_program(dump_custom_program)
Expand All @@ -99,7 +100,8 @@ struct engine_configuration
, priority_mode(priority_mode)
, throttle_mode(throttle_mode)
, enable_memory_pool(memory_pool)
//, tuning_cache_path(tuning_cache_path)
, context(context)
, tuning_cache_path(tuning_cache_path)
{}

engine_configuration(const cldnn_engine_configuration& c_conf)
Expand All @@ -114,7 +116,8 @@ struct engine_configuration
, priority_mode(static_cast<priority_mode_types>(c_conf.priority_mode))
, throttle_mode(static_cast<throttle_mode_types>(c_conf.throttle_mode))
, enable_memory_pool(c_conf.enable_memory_pool != 0)
//, tuning_cache_path(c_conf.tuning_cache_path)
, context(c_conf.context)
, tuning_cache_path(c_conf.tuning_cache_path)
{}

/// @brief Implicit conversion to C API @ref ::cldnn_engine_configuration
Expand All @@ -131,8 +134,9 @@ struct engine_configuration
sources_dumps_dir.c_str(),
static_cast<int16_t>(priority_mode),
static_cast<int16_t>(throttle_mode),
enable_memory_pool
//tuning_cache_path.c_str()
enable_memory_pool,
context,
tuning_cache_path.c_str()
};
}
};
Expand Down
8 changes: 8 additions & 0 deletions api/CPP/layout.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,14 @@ struct layout
{
sizes[0] = align_to(sizes[0], 16);
}
else if (this->format == cldnn::format::os_iyx_osv32 && !is_aligned_to(sizes[0], 32))
{
sizes[0] = align_to(sizes[0], 32);
}
else if (this->format == cldnn::format::os_iyx_osv64 && !is_aligned_to(sizes[0], 64))
{
sizes[0] = align_to(sizes[0], 64);
}
else if (this->format == cldnn::format::bs_xs_xsv8_bsv8 && !(is_aligned_to(sizes[0], 8) && is_aligned_to(sizes[2], 8)))
{
sizes[0] = align_to(sizes[0], 8);
Expand Down
12 changes: 6 additions & 6 deletions api/CPP/pyramidROIAlign.hpp → api/CPP/pyramid_roi_align.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,26 +14,26 @@

#pragma once

#include "../C/pyramidROIAlign.h"
#include "../C/pyramid_roi_align.h"
#include "primitive.hpp"

using namespace std;

namespace cldnn {

struct PyramidROIAlign : public primitive_base<PyramidROIAlign, CLDNN_PRIMITIVE_DESC(PyramidROIAlign)>
struct pyramid_roi_align : public primitive_base<pyramid_roi_align, CLDNN_PRIMITIVE_DESC(pyramid_roi_align)>
{
CLDNN_DECLARE_PRIMITIVE(PyramidROIAlign)
CLDNN_DECLARE_PRIMITIVE(pyramid_roi_align)

PyramidROIAlign(
pyramid_roi_align(
const primitive_id& id,
const primitive_id& input,
const padding& output_padding = padding()
)
: primitive_base(id, { input }, output_padding)
{}

PyramidROIAlign(
pyramid_roi_align(
const primitive_id &id_c,
const primitive_id &base_str,
const primitive_id &meta_str,
Expand All @@ -51,7 +51,7 @@ namespace cldnn {
{}

/// @brief Constructs a copy from C API @CLDNN_PRIMITIVE_DESC{broadcast}
PyramidROIAlign(const dto* dto)
pyramid_roi_align(const dto* dto)
: primitive_base(dto)

{}
Expand Down
14 changes: 14 additions & 0 deletions api/CPP/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ struct format
fyxb = cldnn_format_fyxb, ///< format not used inside clDNN, but supported in reorder as extension for user provided formats.
os_iyx_osv16 = cldnn_format_os_iyx_osv16, ///< format used only for convolution weights: os - output feature maps slice, i - input feature maps, yx - spatials, sv16 - 16 values of single slice.
///< \n \image html os_iyx_osv16.jpg
os_iyx_osv32 = cldnn_format_os_iyx_osv32, ///< format used only for convolution weights: os - output feature maps slice, i - input feature maps, yx - spatials, sv32 - 32 values of single slice.
os_iyx_osv64 = cldnn_format_os_iyx_osv64, ///< format used only for convolution weights: os - output feature maps slice, i - input feature maps, yx - spatials, sv64 - 64 values of single slice.
bs_xs_xsv8_bsv8 = cldnn_format_bs_xs_xsv8_bsv8, ///< format used only for fully connected weights: bs - batch slice, xs - x slice, bsv8 - 8 values of single slice.
///< \n \image html bs_xs_xsv8_bsv8.jpg
bs_xs_xsv8_bsv16 = cldnn_format_bs_xs_xsv8_bsv16,///< format used only for fully connected weights: bs - batch slice, xs - x slice, bsv16 - 16 values of single slice.
Expand Down Expand Up @@ -129,6 +131,8 @@ struct format
{ bfyx,{ 1, 1, 2, 0, "bfyx", "bfxy" } },
{ fyxb,{ 1, 1, 2, 0, "fyxb", "bfxy" } },
{ os_iyx_osv16, { 1, 1, 2, 0, "bfyx", "bfxy" } },
{ os_iyx_osv32,{ 1, 1, 2, 0, "bfyx", "bfxy" } },
{ os_iyx_osv64,{ 1, 1, 2, 0, "bfyx", "bfxy" } },
{ bs_xs_xsv8_bsv8, { 1, 1, 1, 0, "bx", "b?x?" } },
{ bs_xs_xsv8_bsv16,{ 1, 1, 1, 0, "bx", "b?x?" } },
{ bs_x_bsv16, { 1, 1, 1, 0, "bx", "b?x?" } },
Expand Down Expand Up @@ -736,6 +740,16 @@ struct tensor
my_sizes[0] = align_to(my_sizes[0], 16);
adjusted_coords[0] = align_to(adjusted_coords[0], 16);
}
else if (fmt == cldnn::format::os_iyx_osv32 && !is_aligned_to(my_sizes[0], 32))
{
my_sizes[0] = align_to(my_sizes[0], 32);
adjusted_coords[0] = align_to(adjusted_coords[0], 32);
}
else if (fmt == cldnn::format::os_iyx_osv64 && !is_aligned_to(my_sizes[0], 64))
{
my_sizes[0] = align_to(my_sizes[0], 64);
adjusted_coords[0] = align_to(adjusted_coords[0], 64);
}
else if (fmt == cldnn::format::bs_xs_xsv8_bsv8 && !(is_aligned_to(my_sizes[0], 8) && is_aligned_to(my_sizes[1], 8)))
{
my_sizes[0] = align_to(my_sizes[0], 8);
Expand Down
101 changes: 101 additions & 0 deletions api_extension/C/fused_conv_eltwise.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
/*
// Copyright (c) 2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
*/

///////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef FUSED_CONV_ELTWISE_H
#define FUSED_CONV_ELTWISE_H

#include "api/C/cldnn.h"
/// @addtogroup c_api C API
/// @{
/// @addtogroup c_topology Network Topology
/// @{
/// @addtogroup c_primitives Primitives
/// @{

#ifdef __cplusplus
extern "C" {
#endif

/// @brief Performs forward spatial convolution with weight sharing fused with eltwise.
/// Also supports built-in Relu @CLDNN_PRIMITIVE_DESC{activation} separate for convolution and for eltwise, available by setting it in arguments.
CLDNN_BEGIN_PRIMITIVE_DESC(fused_conv_eltwise)

struct conv_data
{
/// @brief Defines a shift, relative to (0,0) position of the input buffer, where (0,0) point of the convolution window should start calculations.
cldnn_tensor input_offset;
/// @brief Defines shift in input buffer between adjacent calculations of output values.
cldnn_tensor stride;
/// @brief Defines gaps in the input - dilation rate k=1 is normal convolution, k=2 means skipping one pixel per input, k=4 means skipping 3 pixels.
/// As an example in one dimension, a filter w of size 3 would compute over input x the following: w[0]*x[0] + w[1]*x[1] + w[2]*x[2] for dilation of 1.
/// For dilation 2 the filter would instead compute w[0]*x[0] + w[1]*x[2] + w[2]*x[4].
cldnn_tensor dilation;
/// @brief Enable Relu activation.
uint32_t with_activation;
/// @brief Relu activation slope.
float activation_negative_slope;
/// @brief On how many cards split the computation to.
uint32_t split;
/// @brief Indicates that the primitive has user-defined output size (non-zero value).
uint32_t with_output_size;
/// @brief User-defined output data size of the primitive (w/o padding).
cldnn_tensor output_size;
/// @brief Array of primitive ids containing weights data. Size of array should be equivalent to @p split.
cldnn_primitive_id_arr weights;
/// @brief Array of primitive ids containing bias data. Size of array should be equivalent to @p split.
cldnn_primitive_id_arr bias;
/// @brief List of primitive ids containing weights quanitization factors per output feature map.
cldnn_primitive_id_arr weights_quantization_factors;
/// @brief List of primitive ids containing output calibration factors per output feature map.
cldnn_primitive_id_arr output_calibration_factors;
/// @brief Input quantization factor
float input_quantization_factor;
/// @brief Output quantization factor
float output_quantization_factor;
} conv;

struct eltw_data
{
/// @brief Primitive id containing output quanitization factors per output feature map.
cldnn_primitive_id output_calibration_factors;
/// @brief Output quantization factor
float output_quantization_factor;
/// @brief Eltwise mode. See #cldnn_eltwise_mode.
int32_t mode; /*cldnn_eltwise_mode*/
/// @brief Blob-wise coefficient for SUM operation
cldnn_float_arr coefficients;
/// @brief Enables Relu activation.
uint32_t with_activation;
/// @brief Relu activation slope.
float activation_negative_slope;
/// @brief Defines shift in input buffers between adjacent calculations of output values.
cldnn_tensor_arr stride;
} eltw;

CLDNN_END_PRIMITIVE_DESC(fused_conv_eltwise)

CLDNN_DECLARE_PRIMITIVE_TYPE_ID(fused_conv_eltwise);

#ifdef __cplusplus
}
#endif

/// @}
/// @}
/// @}
#endif /* FUSED_CONV_ELTWISE_H */

Loading

0 comments on commit f91d7d8

Please sign in to comment.