Skip to content

Commit

Permalink
Adding uniform, normal, constant initializers
Browse files Browse the repository at this point in the history
  • Loading branch information
pavanky committed Jul 24, 2017
1 parent 5945d79 commit 5f571e0
Show file tree
Hide file tree
Showing 9 changed files with 240 additions and 73 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ target_sources(afml
src/nn/Modules/Container.cpp
src/nn/Modules/Linear.cpp
src/nn/Modules/Module.cpp
src/nn/Types.cpp
src/nn/Init.cpp
)

target_include_directories(afml
Expand Down
2 changes: 1 addition & 1 deletion include/af/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@
#pragma once

#include <af/nn/Modules.hpp>
#include <af/nn/Types.hpp>
#include <af/nn/Init.hpp>
74 changes: 74 additions & 0 deletions include/af/nn/Init.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
/*******************************************************
* Copyright (c) 2017, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#pragma once

#include <af/autograd/Variable.hpp>

namespace af {
namespace nn {

autograd::Variable input(const af::array &arr);

autograd::Variable parameter(const af::array &arr);

autograd::Variable uniform(int input_size, int output_size,
double min = 0, double max = 1,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable uniform(af::dim4 dims,
double min = 0, double max = 1,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable normal(int input_size, int output_size,
double stdv = 1, double mean = 0,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable normal(af::dim4 dims,
double stdv = 1, double mean = 0,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable lecunUniform(int input_size, int output_size,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable lecunUniform(af::dim4 dims,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable lecunNormal(int input_size, int output_size,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable lecunNormal(af::dim4 dims,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable glorotUniform(int input_size, int output_size,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable glorotUniform(af::dim4 dims,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable glorotNormal(int input_size, int output_size,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable glorotNormal(af::dim4 dims,
af::dtype type = f32, bool calc_grad=true);


autograd::Variable constant(double val, int input_size, int output_size,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable constant(double val, af::dim4 dims,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable identity(int input_size, int output_size,
af::dtype type = f32, bool calc_grad=true);

autograd::Variable identity(af::dim4 dims,
af::dtype type = f32, bool calc_grad=true);

}
}
14 changes: 7 additions & 7 deletions include/af/nn/Modules/Activations.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,26 +35,26 @@ namespace af
{
public:
ReLU();

autograd::Variable forward(const autograd::Variable &input);
};

class LeakyReLU : public Module
{
private:
double m_slope;
public:
LeakyReLU(double slope = 0.0);

autograd::Variable forward(const autograd::Variable &input);
};

class PReLU : public Module
{
public:
PReLU(int size, double spread = 1.0);
PReLU(int size, double value = 1.0);
PReLU(const autograd::Variable &w);

autograd::Variable forward(const autograd::Variable &input);
};

Expand All @@ -74,11 +74,11 @@ namespace af
double m_threshold;
public:
ThresholdReLU(double threshold = 1.0);

autograd::Variable forward(const autograd::Variable &input);
};



}
}
22 changes: 0 additions & 22 deletions include/af/nn/Types.hpp

This file was deleted.

151 changes: 151 additions & 0 deletions src/nn/Init.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
/*******************************************************
* Copyright (c) 2017, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/

#include <cmath>

#include <af/nn/Init.hpp>

namespace af {
namespace nn {

using autograd::Variable;

Variable input(const af::array &arr)
{
return Variable(arr, false);
}

Variable parameter(const af::array &arr)
{
return Variable(arr, true);
}

autograd::Variable uniform(int output_size, int input_size,
double min, double max,
af::dtype type, bool calc_grad)
{
return nn::uniform(af::dim4(output_size, input_size), min, max, type, calc_grad);
}

autograd::Variable uniform(af::dim4 dims, double min, double max,
af::dtype type, bool calc_grad)
{
af::array result = af::randu(dims, type);
if (min != 0 || max != 1) {
result = (max - min) * result + min;
}
return Variable(result, calc_grad);
}

autograd::Variable normal(int output_size, int input_size,
double stdv, double mean,
af::dtype type, bool calc_grad)
{
return nn::normal(af::dim4(output_size, input_size), stdv, mean, type, calc_grad);
}

autograd::Variable normal(af::dim4 dims, double stdv, double mean,
af::dtype type, bool calc_grad)
{
af::array result = af::randn(dims, type);
if (mean != 0 || stdv != 1) {
result = stdv * result + mean;
}
return Variable(result, calc_grad);
}

autograd::Variable lecunUniform(int output_size, int input_size,
af::dtype type, bool calc_grad)
{
return nn::lecunUniform(af::dim4(output_size, input_size), type, calc_grad);
}

autograd::Variable lecunUniform(af::dim4 dims,
af::dtype type, bool calc_grad)
{
dim_t elements = dims.elements();
dim_t fan_in = elements / dims[1];
double stdv = ::sqrt(1.0/(double)fan_in);
double limit = ::sqrt(3.0) * stdv;
return nn::uniform(dims, -limit, limit, type, calc_grad);
}

autograd::Variable lecunNormal(int output_size, int input_size,
af::dtype type, bool calc_grad)
{
return nn::lecunNormal(af::dim4(output_size, input_size), type, calc_grad);
}

autograd::Variable lecunNormal(af::dim4 dims,
af::dtype type, bool calc_grad)
{
dim_t elements = dims.elements();
dim_t fan_in = elements / dims[1];
double stdv = ::sqrt(1.0/(double)fan_in);
return nn::normal(dims, 0, stdv, type, calc_grad);
}

autograd::Variable glorotUniform(int output_size, int input_size,
af::dtype type, bool calc_grad)
{
return nn::glorotUniform(af::dim4(output_size, input_size), type, calc_grad);
}

autograd::Variable glorotUniform(af::dim4 dims,
af::dtype type, bool calc_grad)
{
dim_t elements = dims.elements();
dim_t fan_in = elements / dims[1];
dim_t fan_out = elements / dims[0];
double stdv = ::sqrt(2.0/(double)(fan_in + fan_out));
double limit = ::sqrt(3.0) * stdv;
return nn::uniform(dims, -limit, limit, type, calc_grad);
}

autograd::Variable glorotNormal(int output_size, int input_size,
af::dtype type, bool calc_grad)
{
return nn::glorotNormal(af::dim4(output_size, input_size), type, calc_grad);
}

autograd::Variable glorotNormal(af::dim4 dims,
af::dtype type, bool calc_grad)
{
dim_t elements = dims.elements();
dim_t fan_in = elements / dims[1];
dim_t fan_out = elements / dims[0];
double stdv = ::sqrt(2.0/(double)(fan_in + fan_out));
return nn::normal(dims, 0, stdv, type, calc_grad);
}

autograd::Variable constant(double val, int output_size, int input_size,
af::dtype type, bool calc_grad)
{
return nn::constant(val, af::dim4(output_size, input_size), type, calc_grad);
}

autograd::Variable constant(double val, af::dim4 dims,
af::dtype type, bool calc_grad)
{
return Variable(af::constant(val, dims, type), calc_grad);
}

autograd::Variable identity(int output_size, int input_size,
af::dtype type, bool calc_grad)
{
return nn::identity(af::dim4(output_size, input_size), type, calc_grad);
}

autograd::Variable identity(af::dim4 dims,
af::dtype type, bool calc_grad)
{
return Variable(af::identity(dims, type), calc_grad);
}
}
}
6 changes: 3 additions & 3 deletions src/nn/Modules/Activations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

#include <af/autograd/Functions.hpp>
#include <af/nn/Modules/Activations.hpp>
#include <af/nn/Types.hpp>
#include <af/nn/Init.hpp>
namespace af
{
namespace nn
Expand Down Expand Up @@ -47,9 +47,9 @@ namespace af
return max(input, m_slope * input);
}

PReLU::PReLU(int size, double spread)
PReLU::PReLU(int size, double value)
{
auto w = nn::weight(size, 1, spread);
auto w = nn::constant(value, size, 1);
setParams({w});
}

Expand Down
6 changes: 3 additions & 3 deletions src/nn/Modules/Linear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
********************************************************/
#include <af/autograd/Functions.hpp>

#include <af/nn/Types.hpp>
#include <af/nn/Init.hpp>
#include <af/nn/Modules/Linear.hpp>

namespace af
Expand All @@ -20,9 +20,9 @@ namespace af
Linear::Linear(int input_size, int output_size, bool bias, float spread) :
m_bias(bias)
{
auto w = nn::weight(input_size, output_size, spread);
auto w = nn::lecunNormal(output_size, input_size);
if (bias) {
auto b = nn::weight(1, output_size, spread);
auto b = nn::lecunNormal(output_size, 1);
setParams({w, b});
} else {
setParams({w});
Expand Down
36 changes: 0 additions & 36 deletions src/nn/Types.cpp

This file was deleted.

0 comments on commit 5f571e0

Please sign in to comment.