Skip to content

Commit

Permalink
[bugfix] Fix issues occured in Tensor class refactoring
Browse files Browse the repository at this point in the history
This commit aims to fix several issues that arose due to the refactoring of the Tensor class.

**Changes proposed in this PR:**
- The copy constructor has been implemented to prevent incorrect behavior of the default copy constructor in this commit
- Tensor add_i() has been newly implemented to fix previous incorrect implementations.
- Add chain() function that returns LazyTensor

**Self-evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test:   [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Donghyeon Jeong <[email protected]>
  • Loading branch information
djeong20 committed Mar 15, 2024
1 parent c129345 commit e4232d2
Show file tree
Hide file tree
Showing 7 changed files with 124 additions and 12 deletions.
15 changes: 15 additions & 0 deletions nntrainer/tensor/float_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -507,6 +507,21 @@ Tensor &FloatTensor::add_strided(Tensor const &input, Tensor &output,
return output;
}

int FloatTensor::add_i(Tensor const &m, Tensor &output, float const alpha) {
auto f = [&](const BroadcastInfo &e, const float *buf, const float *m_buf,
float *out_buf) {
saxpy(e.buffer_size, alpha, m_buf, e.strides[3], out_buf, strides[3]);
};

try {
apply_broadcast(m, f, output);
} catch (std::exception &err) {
ml_loge("%s %s", typeid(err).name(), err.what());
return ML_ERROR_INVALID_PARAMETER;
}
return ML_ERROR_NONE;
}

Tensor &FloatTensor::add(float const &value, Tensor &output) const {
auto f = std::bind(std::plus<float>(), std::placeholders::_1, value);
apply(f, output);
Expand Down
11 changes: 11 additions & 0 deletions nntrainer/tensor/float_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,12 @@ class FloatTensor : public TensorBase {
std::vector<std::vector<std::vector<std::vector<float>>>> const &d,
Tformat fm);

/**
* @brief Construct a new FloatTensor object
* @param rhs TensorBase object to copy
*/
FloatTensor(TensorBase &rhs) : TensorBase(rhs) {}

/**
* @brief Basic Destructor
*/
Expand Down Expand Up @@ -256,6 +262,11 @@ class FloatTensor : public TensorBase {
Tensor &add_strided(Tensor const &input, Tensor &output,
const float beta) const override;

/**
* @copydoc Tensor::add_i(Tensor const &m, float const alpha)
*/
int add_i(Tensor const &m, Tensor &output, float const alpha) override;

/**
* @copydoc Tensor::add(float const &value, Tensor &output)
*/
Expand Down
16 changes: 16 additions & 0 deletions nntrainer/tensor/half_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -479,6 +479,22 @@ Tensor &HalfTensor::add_strided(Tensor const &input, Tensor &output,
return output;
}

int HalfTensor::add_i(Tensor const &m, Tensor &output, float const alpha) {
auto f = [&](const BroadcastInfo &e, const _FP16 *buf, const _FP16 *m_buf,
_FP16 *out_buf) {
saxpy(e.buffer_size, alpha, m_buf, e.strides[3], out_buf, strides[3]);
/// @todo: saxpy is not valid for _FP16
};

try {
apply_broadcast(m, f, output);
} catch (std::exception &err) {
ml_loge("%s %s", typeid(err).name(), err.what());
return ML_ERROR_INVALID_PARAMETER;
}
return ML_ERROR_NONE;
}

Tensor &HalfTensor::add(float const &value, Tensor &output) const {
auto f = std::bind(std::plus<_FP16>(), std::placeholders::_1,
static_cast<_FP16>(value));
Expand Down
12 changes: 12 additions & 0 deletions nntrainer/tensor/half_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,13 @@ class HalfTensor : public TensorBase {
HalfTensor(std::vector<std::vector<std::vector<std::vector<_FP16>>>> const &d,
Tformat fm);

/**
* @brief Construct a new FloatTensor object
*
* @param rhs TensorBase object to copy
*/
HalfTensor(TensorBase &rhs) : TensorBase(rhs) {}

/**
* @brief Basic Destructor
*/
Expand Down Expand Up @@ -255,6 +262,11 @@ class HalfTensor : public TensorBase {
Tensor &add_strided(Tensor const &input, Tensor &output,
const float beta) const override;

/**
* @copydoc Tensor::add_i(Tensor const &m, float const alpha)
*/
int add_i(Tensor const &m, Tensor &output, float const alpha) override;

/**
* @copydoc Tensor::add(float const &value, Tensor &output)
*/
Expand Down
44 changes: 35 additions & 9 deletions nntrainer/tensor/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
*/

#include <float_tensor.h>
#include <lazy_tensor.h>
#include <tensor.h>

#ifdef ENABLE_FP16
Expand Down Expand Up @@ -100,6 +101,35 @@ Tensor::Tensor(
}
#endif

Tensor::Tensor(const Tensor &rhs) {
if (rhs.getDataType() == Tdatatype::FP32) {
itensor = std::shared_ptr<FloatTensor>(new FloatTensor(*rhs.itensor),
std::default_delete<FloatTensor>());
} else if (rhs.getDataType() == Tdatatype::FP16) {
#ifdef ENABLE_FP16
itensor = std::shared_ptr<HalfTensor>(new HalfTensor(*rhs.itensor),
std::default_delete<HalfTensor>());
#else
throw std::invalid_argument("Error: enable-fp16 is not enabled");
#endif
}
}

Tensor &Tensor::operator=(const Tensor &rhs) {
if (rhs.getDataType() == Tdatatype::FP32) {
itensor = std::shared_ptr<FloatTensor>(new FloatTensor(*rhs.itensor),
std::default_delete<FloatTensor>());
} else if (rhs.getDataType() == Tdatatype::FP16) {
#ifdef ENABLE_FP16
itensor = std::shared_ptr<HalfTensor>(new HalfTensor(*rhs.itensor),
std::default_delete<HalfTensor>());
#else
throw std::invalid_argument("Error: enable-fp16 is not enabled");
#endif
}
return *this;
}

bool Tensor::operator==(const Tensor &rhs) const {
/// compares tensor information
if (*itensor == *rhs.itensor) {
Expand Down Expand Up @@ -176,7 +206,7 @@ int Tensor::multiply_i_strided(Tensor const &m, const float beta) {
}

Tensor Tensor::multiply_strided(Tensor const &m, const float beta) const {
Tensor t;
Tensor t("", getFormat(), getDataType());
return this->multiply_strided(m, t, beta);
}

Expand All @@ -194,7 +224,7 @@ int Tensor::multiply_i(float const &value) {
}

Tensor Tensor::multiply(float const &value) const {
Tensor t;
Tensor t("", getFormat(), getDataType());
return multiply(value, t);
}

Expand Down Expand Up @@ -319,13 +349,7 @@ Tensor &Tensor::add(float const &value, Tensor &output) const {
}

int Tensor::add_i(Tensor const &m, float const alpha) {
try {
this->add(m, *this, alpha);
} catch (std::exception &err) {
ml_loge("%s %s", typeid(err).name(), err.what());
return ML_ERROR_INVALID_PARAMETER;
}
return ML_ERROR_NONE;
return itensor->add_i(m, *this, alpha);
}

Tensor Tensor::add(Tensor const &m, float const alpha) const {
Expand Down Expand Up @@ -536,6 +560,8 @@ void Tensor::cos(Tensor &out, float alpha) {
itensor->cos(out, alpha);
}

LazyTensor Tensor::chain() const { return LazyTensor(*this); }

float Tensor::l2norm() const { return itensor->l2norm(); }

void Tensor::normalization_i() {
Expand Down
18 changes: 15 additions & 3 deletions nntrainer/tensor/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
#define __TENSOR_H__
#ifdef __cplusplus

#define MAKE_SHARED_TENSOR(...) std::make_shared<nntrainer::Tensor>(__VA_ARGS__)

#define CREATE_IF_EMPTY_DIMS(tensor, ...) \
do { \
if (tensor.empty()) \
Expand All @@ -26,6 +28,8 @@

namespace nntrainer {

class LazyTensor;

/**
* @class Tensor Class
* @brief Tensor Class
Expand Down Expand Up @@ -213,7 +217,7 @@ class Tensor {
* @brief Copy constructor of Tensor.
* @param[in] Tensor &
*/
Tensor(const Tensor &rhs) = default;
Tensor(const Tensor &rhs);

/**
* @brief Move constructor of Tensor.
Expand All @@ -225,7 +229,7 @@ class Tensor {
* @brief Copy assignment operator.
* @param[in] rhs Tensor to be copied.
*/
Tensor &operator=(const Tensor &rhs) = default;
Tensor &operator=(const Tensor &rhs);

/**
* @brief Move assignment operator.
Expand Down Expand Up @@ -269,7 +273,7 @@ class Tensor {
"Creating shared tensor of size bigger than tensor memory.");
}

Tensor output;
Tensor output("", d.getFormat(), d.getDataType());
output.setTensorVar(d, buf, offset);
return output;
};
Expand Down Expand Up @@ -941,6 +945,12 @@ class Tensor {
*/
void cos(Tensor &out, float alpha = 1.0);

/**
* @brief Anchor a starting point to defer following evaluation
* @retval LazyTensor class that can be used with run();
*/
LazyTensor chain() const;

/**
* @brief l2norm the Tensor elements
* @retval Calculated l2norm
Expand Down Expand Up @@ -1439,6 +1449,8 @@ class Tensor {
std::swap(lhs.itensor, rhs.itensor);
}

static constexpr float epsilon = 1e-5;

private:
std::shared_ptr<TensorBase> itensor;

Expand Down
20 changes: 20 additions & 0 deletions nntrainer/tensor/tensor_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,21 @@ class TensorBase {
TensorBase(const TensorDim &d, const void *buf = nullptr) :
TensorBase(d, true) {}

/**
* @brief Copy constructor of TensorBase.
* @param[in] Tensor &
*/
TensorBase(const TensorBase &rhs) {
dim = rhs.dim;
strides = rhs.strides;
contiguous = rhs.contiguous;
initializer = rhs.initializer;
name = rhs.name;
data = rhs.data;
offset = rhs.offset;
src_tensor = rhs.src_tensor;
}

/**
* @brief Comparison operator overload
* @param[in] rhs Tensor to be compared with
Expand Down Expand Up @@ -263,6 +278,11 @@ class TensorBase {
virtual Tensor &add_strided(Tensor const &input, Tensor &output,
const float beta) const = 0;

/**
* @copydoc Tensor::add_i(Tensor const &m, float const alpha)
*/
virtual int add_i(Tensor const &m, Tensor &output, float const alpha) = 0;

/**
* @copydoc Tensor::add(float const &value, Tensor &output)
*/
Expand Down

0 comments on commit e4232d2

Please sign in to comment.