Skip to content

Commit

Permalink
fixes after rebase and review fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
antonvor committed May 12, 2021
1 parent a2b0c40 commit f07cee8
Show file tree
Hide file tree
Showing 8 changed files with 146 additions and 213 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,8 @@ const std::vector<fusingSpecificParams> fusingParamsSet{
fusingFakeQuantizePerChannelRelu,
fusingSumEluFQ,
fusingSum,
fusingPRelu1D
fusingPRelu1D,
fusingAddPerChannel
};

const std::vector<fusingSpecificParams> fusingParamsSetBF16{
Expand All @@ -153,7 +154,8 @@ const std::vector<fusingSpecificParams> fusingParamsSetBF16{
// other patterns
fusingReluAdd,
fusingReluScaleShift,
fusingSum
fusingSum,
fusingAddPerChannel
};

const std::vector<fusingSpecificParams> fusingParamsSetI8{
Expand All @@ -163,18 +165,20 @@ const std::vector<fusingSpecificParams> fusingParamsSetI8{
fusingElu,
fusingSigmoid,
fusingClamp,
fusingPRelu,
fusingPReluPerChannel,
// todo: [antonvor] not supported yet
// fusingSwish,
fusingHSwish,
fusingMish,
// other patterns
fusingReluScaleShift,
// todo: [antonvor] not supported yet
// fusingReluScaleShift,
fusingFakeQuantizePerTensorRelu,
fusingFakeQuantizePerChannelRelu,
// todo: [antonvor] not supported yet
// fusingSumEluFQ,
// fusingSum
// fusingSum,
// fusingAddPerChannel
};

const std::map<std::string, std::string> cpuEmptyPluginConfig;
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,8 @@ std::vector<fusingSpecificParams> fusingParamsSet {
fusingFakeQuantizePerChannelRelu,
fusingSumEluFQ,
fusingSum,
fusingPRelu1D
fusingPRelu1D,
fusingAddPerChannel
};


Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,7 @@ std::shared_ptr<ngraph::Node> makeConvolutionRelaxed(const ngraph::Output<Node>
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
size_t numOutChannels,
bool addBiases = false,
const std::vector<float> &filterWeights = {},
const std::vector<float> &biasesWeights = {});
const std::vector<float> &filterWeights = {});

std::shared_ptr<ngraph::Node> makeGroupConvolution(const ngraph::Output<Node> &in,
const element::Type &type,
Expand Down Expand Up @@ -163,9 +161,7 @@ std::shared_ptr<ngraph::Node> makeConvolutionBackpropDataRelaxed(const ngraph::O
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
size_t numOutChannels,
bool addBiases = false,
const std::vector<float> &filterWeights = {},
const std::vector<float> &biasesWeights = {});
const std::vector<float> &filterWeights = {});

std::shared_ptr<ngraph::Node> makeCTCGreedyDecoder(
const ngraph::Output<Node>& inputData,
Expand Down Expand Up @@ -232,9 +228,7 @@ std::shared_ptr<Node> makeGroupConvolutionBackpropDataRelaxed(const ngraph::Outp
const op::PadType &autoPad,
size_t numOutChannels,
size_t numGroups,
bool addBiases = false,
const std::vector<float> &filterWeights = {},
const std::vector<float> &biasesWeights = {});
const std::vector<float> &filterWeights = {});

std::shared_ptr<ngraph::Node> makeBinaryConvolution(const ngraph::Output<Node> &in,
const std::vector<size_t> &filterSize,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,7 @@ std::shared_ptr<Node> makeConvolutionRelaxed(const ngraph::Output<Node> &in,
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
size_t numOutChannels,
bool addBiases,
const std::vector<float> &filterWeights,
const std::vector<float> &biasesWeights) {
const std::vector<float> &filterWeights) {
auto inputParamsFP32 = ngraph::builder::makeParams(ngraph::element::f32, { in.get_shape() });
auto paramOutsFP32 = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(inputParamsFP32));

Expand All @@ -68,14 +66,7 @@ std::shared_ptr<Node> makeConvolutionRelaxed(const ngraph::Output<Node> &in,
auto newConvolution = convolutionNodeRelaxed->copy_with_new_inputs(
{in, filterWeightsNode});

if (addBiases) {
bool randomBiases = biasesWeights.empty();
auto biasesWeightsNode = makeConstant(element::f32, {1, numOutChannels , 1, 1}, biasesWeights, randomBiases);
auto add = std::make_shared<ngraph::opset1::Add>(newConvolution, biasesWeightsNode);
return add;
} else {
return newConvolution;
}
return newConvolution;
}

} // namespace builder
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,7 @@ std::shared_ptr<Node> makeConvolutionBackpropDataRelaxed(const ngraph::Output<No
const std::vector<size_t> &dilations,
const op::PadType &autoPad,
size_t numOutChannels,
bool addBiases,
const std::vector<float> &filterWeights,
const std::vector<float> &biasesWeights) {
const std::vector<float> &filterWeights) {
auto inputParamsFP32 = ngraph::builder::makeParams(ngraph::element::f32, { in.get_shape() });
auto paramOutsFP32 = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(inputParamsFP32));

Expand All @@ -83,14 +81,7 @@ std::shared_ptr<Node> makeConvolutionBackpropDataRelaxed(const ngraph::Output<No
auto newDeconvolution = deconvolutionNodeRelaxed->copy_with_new_inputs(
{in, filterWeightsNode});

if (addBiases) {
bool randomBiases = biasesWeights.empty();
auto biasesWeightsNode = makeConstant(outType, {}, biasesWeights, randomBiases);
auto add = std::make_shared<ngraph::opset1::Add>(newDeconvolution, biasesWeightsNode);
return add;
} else {
return newDeconvolution;
}
return newDeconvolution;
}

} // namespace builder
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,7 @@ std::shared_ptr<Node> makeGroupConvolutionBackpropDataRelaxed(const ngraph::Outp
const op::PadType &autoPad,
size_t numOutChannels,
size_t numGroups,
bool addBiases,
const std::vector<float> &filterWeights,
const std::vector<float> &biasesWeights) {
const std::vector<float> &filterWeights) {
auto inputParamsFP32 = ngraph::builder::makeParams(ngraph::element::f32, { in.get_shape() });
auto paramOutsFP32 = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(inputParamsFP32));

Expand All @@ -92,14 +90,7 @@ std::shared_ptr<Node> makeGroupConvolutionBackpropDataRelaxed(const ngraph::Outp
auto newGroupDeconvolution = groupDeconvolutionNodeRelaxed->copy_with_new_inputs(
{in, filterWeightsNode});

if (addBiases) {
bool randomBiases = biasesWeights.empty();
auto biasesWeightsNode = makeConstant(outType, {}, biasesWeights, randomBiases);
auto add = std::make_shared<ngraph::opset1::Add>(newGroupDeconvolution, biasesWeightsNode);
return add;
} else {
return newGroupDeconvolution;
}
return newGroupDeconvolution;
}

} // namespace builder
Expand Down

0 comments on commit f07cee8

Please sign in to comment.