-
Notifications
You must be signed in to change notification settings - Fork 5.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add ScaleShiftLayer #3560
Add ScaleShiftLayer #3560
Changes from 1 commit
7d2ef02
83abbce
0af1c4a
8e4dcf8
f6dc56a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,106 @@ | ||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. */ | ||
|
||
#include "Layer.h" | ||
|
||
namespace paddle { | ||
|
||
/** | ||
* A layer does scaling and shifting to the input by appling a slope and | ||
* an intercept which are trainable to the input element-wise. | ||
* | ||
* \f[ | ||
* y = wx + b | ||
* \f] | ||
* | ||
* Here, w is scale and b is offset, which are scalars and trainable. | ||
* | ||
*/ | ||
|
||
class ScaleShiftLayer : public Layer { | ||
protected: | ||
std::unique_ptr<Weight> scale_; | ||
std::unique_ptr<Weight> offset_; | ||
|
||
public: | ||
explicit ScaleShiftLayer(const LayerConfig& config) : Layer(config) {} | ||
|
||
bool init(const LayerMap& layerMap, | ||
const ParameterMap& parameterMap) override; | ||
|
||
void forward(PassType passType) override; | ||
void backward(const UpdateCallback& callback = nullptr) override; | ||
}; | ||
|
||
REGISTER_LAYER(scale_shift, ScaleShiftLayer); | ||
|
||
bool ScaleShiftLayer::init(const LayerMap& layerMap, | ||
const ParameterMap& parameterMap) { | ||
Layer::init(layerMap, parameterMap); | ||
CHECK_EQ(inputLayers_.size(), 1U); | ||
scale_.reset(new Weight(1, 1, parameters_[0])); | ||
if (biasParameter_.get() != NULL) { | ||
offset_ = std::unique_ptr<Weight>(new Weight(1, 1, biasParameter_)); | ||
} | ||
return true; | ||
} | ||
|
||
void ScaleShiftLayer::forward(PassType passType) { | ||
Layer::forward(passType); | ||
|
||
MatrixPtr inV = getInputValue(0); | ||
resetOutput(inV->getHeight(), inV->getWidth()); | ||
MatrixPtr outV = getOutputValue(); | ||
real scaleValue = scale_->getW()->getElement(0, 0); | ||
outV->mulScalar(*inV, scaleValue); | ||
if (offset_) { | ||
real offsetValue = offset_->getW()->getElement(0, 0); | ||
outV->add(offsetValue); | ||
} | ||
} | ||
|
||
void ScaleShiftLayer::backward(const UpdateCallback& callback) { | ||
MatrixPtr inV = getInputValue(0); | ||
MatrixPtr inG = getInputGrad(0); | ||
MatrixPtr outV = getOutputValue(); | ||
MatrixPtr outG = getOutputGrad(); | ||
|
||
/* Calculate the parameter gradient for the current layer */ | ||
if (scale_->getWGrad()) { | ||
MatrixPtr rowSumMtx; | ||
Matrix::resizeOrCreate(rowSumMtx, outG->getHeight(), 1, false, useGpu_); | ||
// this_i = scaleDest * this_i + scaleSum * \sum_j b_{ij} * c_{ij} | ||
rowSumMtx->sumOfProducts( | ||
/* b= */ *inV, /* c= */ *outG, /* scaleSum= */ 1, /* scaleDest= */ 0.); | ||
// this_i = scaleDest * this_i + scaleSum * \sum_j b_{ji} | ||
scale_->getWGrad()->sumCols( | ||
/* b= */ *rowSumMtx, /* scaleSum= */ 1., /* scaleDest= */ 1.); | ||
scale_->getParameterPtr()->incUpdate(callback); | ||
} | ||
if (offset_ && offset_->getWGrad()) { | ||
MatrixPtr rowSumMtx; | ||
Matrix::resizeOrCreate(rowSumMtx, outG->getHeight(), 1, false, useGpu_); | ||
rowSumMtx->sumRows(*outG, 1., 0.); | ||
offset_->getWGrad()->sumCols(*rowSumMtx, 1., 1.); | ||
offset_->getParameterPtr()->incUpdate(callback); | ||
} | ||
|
||
/* Calculate the input layers error */ | ||
if (inG) { | ||
real scaleValue = scale_->getW()->getElement(0, 0); | ||
inG->add(*outG, scaleValue); | ||
} | ||
} | ||
|
||
} // namespace paddle |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2007,6 +2007,21 @@ TEST(Layer, RowL2NormLayer) { | |
} | ||
} | ||
|
||
TEST(Layer, ScaleShiftLayer) { | ||
const size_t batchSize = 128; | ||
const size_t size = 512; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这两个size 改小一些,这个layer 对size 不是非常敏感,没有必要测试这么大的layer。 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
TestConfig config; | ||
config.layerConfig.set_type("scale_shift"); | ||
config.layerConfig.set_size(size); | ||
config.biasSize = 1; | ||
config.inputDefs.push_back( | ||
{INPUT_DATA, "input", /* dim= */ size, /* paraSize= */ 1}); | ||
config.layerConfig.add_inputs(); | ||
for (auto useGpu : {false, true}) { | ||
testLayerGrad(config, "scale_shift", batchSize, false, useGpu, false); | ||
} | ||
} | ||
|
||
int main(int argc, char** argv) { | ||
testing::InitGoogleTest(&argc, argv); | ||
initMain(argc, argv); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -133,6 +133,7 @@ | |
'clip_layer', | ||
'slice_projection', | ||
'kmax_sequence_score_layer', | ||
'scale_shift_layer', | ||
] | ||
|
||
|
||
|
@@ -230,6 +231,7 @@ class LayerType(object): | |
CLIP_LAYER = 'clip' | ||
|
||
KMAX_SEQ_SCORE = 'kmax_seq_score' | ||
SCALE_SHIFT_LAYER = 'scale_shift' | ||
|
||
@staticmethod | ||
def is_layer_type(type_name): | ||
|
@@ -6210,3 +6212,38 @@ def kmax_sequence_score_layer(input, name=None, beam_size=1): | |
|
||
return LayerOutput( | ||
name, LayerType.KMAX_SEQ_SCORE, parents=[input], size=input.size) | ||
|
||
|
||
@wrap_name_default("scale_shift") | ||
@wrap_param_attr_default() | ||
@wrap_bias_attr_default() | ||
def scale_shift_layer(input, name=None, param_attr=None, bias_attr=None): | ||
""" | ||
A layer does scaling and shifting to the input by appling a slope and | ||
an intercept which are trainable to the input element-wise. | ||
.. math:: | ||
|
||
y = w * x + b | ||
|
||
.. code-block:: python | ||
|
||
scale_shift = scale_shift_layer(input=input_layer, bias_attr=False) | ||
|
||
:param name: The Layer Name. | ||
:type name: basestring | ||
:param input: The input layer. | ||
:type input: LayerOutput. | ||
:param param_attr: The parameter attribute of scaling. | ||
:type param_attr: ParameterAttribute | ||
:param bias_attr: The parameter attribute of shifting. | ||
:type bias_attr: ParameterAttribute | ||
:return: LayerOutput object. | ||
:rtype: LayerOutput | ||
""" | ||
Layer( | ||
name=name, | ||
type=LayerType.SCALE_SHIFT_LAYER, | ||
inputs=Input(input.name, **param_attr.attr), | ||
bias=ParamAttr.to_bias(bias_attr)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 有一点想确认一下,如果用户不指定 w 这个参数的初始化 std, mean 和策略, w 这个参数会如何初始化?会不会有意外情况被设置为 0 ,后续网络可能就直接废掉了 ,默认值设置的逻辑有检查过吗 ? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. 进行了检查,目前设置会默认初始化为mean=0,std=1.0。 |
||
return LayerOutput( | ||
name, LayerType.SCALE_SHIFT_LAYER, parents=[input], size=input.size) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,72 @@ | ||
type: "nn" | ||
layers { | ||
name: "data" | ||
type: "data" | ||
size: 100 | ||
active_type: "" | ||
} | ||
layers { | ||
name: "__scale_shift_0__" | ||
type: "scale_shift" | ||
size: 100 | ||
active_type: "" | ||
inputs { | ||
input_layer_name: "data" | ||
input_parameter_name: "___scale_shift_0__.w0" | ||
} | ||
bias_parameter_name: "___scale_shift_0__.wbias" | ||
} | ||
layers { | ||
name: "__scale_shift_1__" | ||
type: "scale_shift" | ||
size: 100 | ||
active_type: "" | ||
inputs { | ||
input_layer_name: "data" | ||
input_parameter_name: "___scale_shift_1__.w0" | ||
} | ||
} | ||
parameters { | ||
name: "___scale_shift_0__.w0" | ||
size: 1 | ||
initial_mean: 0.0 | ||
initial_std: 1.0 | ||
dims: 1 | ||
dims: 1 | ||
initial_strategy: 0 | ||
initial_smart: true | ||
} | ||
parameters { | ||
name: "___scale_shift_0__.wbias" | ||
size: 1 | ||
initial_mean: 0.0 | ||
initial_std: 0.0 | ||
dims: 1 | ||
dims: 1 | ||
initial_strategy: 0 | ||
initial_smart: false | ||
} | ||
parameters { | ||
name: "___scale_shift_1__.w0" | ||
size: 1 | ||
initial_mean: 0.0 | ||
initial_std: 1.0 | ||
dims: 1 | ||
dims: 1 | ||
initial_strategy: 0 | ||
initial_smart: true | ||
} | ||
input_layer_names: "data" | ||
output_layer_names: "__scale_shift_0__" | ||
output_layer_names: "__scale_shift_1__" | ||
sub_models { | ||
name: "root" | ||
layer_names: "data" | ||
layer_names: "__scale_shift_0__" | ||
layer_names: "__scale_shift_1__" | ||
input_layer_names: "data" | ||
output_layer_names: "__scale_shift_0__" | ||
output_layer_names: "__scale_shift_1__" | ||
is_recurrent_layer_group: false | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
from paddle.trainer_config_helpers import * | ||
|
||
settings(batch_size=1000, learning_rate=1e-5) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这一行可以去掉。 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
|
||
data = data_layer(name='data', size=100) | ||
|
||
scale = scale_shift_layer(input=data) | ||
|
||
scale_shift = scale_shift_layer(input=data, bias_attr=False) | ||
|
||
outputs(scale, scale_shift) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
w is the scale and b is the bias. Both w and b are trainable scalars.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done. Thanks.