Skip to content

Commit

Permalink
[ GTEST ] Add gtest to run gtest in android device
Browse files Browse the repository at this point in the history
Add Gtest codes

**Self evaluation:**
1. Build test:	 [X]Passed [ ]Failed [ ]Skipped
2. Run test:	 [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: jijoong.moon <[email protected]>
  • Loading branch information
jijoongmoon committed Jul 17, 2023
1 parent 149e087 commit 4356071
Show file tree
Hide file tree
Showing 157 changed files with 66,452 additions and 35 deletions.
75 changes: 40 additions & 35 deletions nntrainer/tensor/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,7 @@ Tensor::Tensor(const TensorDim &d, bool alloc_now, Tensor::Initializer init,
}
}

Tensor::Tensor(const TensorDim &d, const void *buf) :
Tensor(d, true) {
Tensor::Tensor(const TensorDim &d, const void *buf) : Tensor(d, true) {
if (d.getDataLen() != 0) {
if (buf != nullptr)
copy(buf);
Expand Down Expand Up @@ -1133,7 +1132,8 @@ Tensor &Tensor::sum(unsigned int axis, Tensor &ret, float alpha,

switch (axis) {
case 0: {
CREATE_IF_EMPTY_DIMS(ret, 1, dim.channel(), dim.height(), dim.width(),getTensorType());
CREATE_IF_EMPTY_DIMS(ret, 1, dim.channel(), dim.height(), dim.width(),
getTensorType());
size_t feat_len = dim.getFeatureLen();
size_t batch = dim.batch();
Tensor ones(1, 1, 1, batch);
Expand All @@ -1142,7 +1142,8 @@ Tensor &Tensor::sum(unsigned int axis, Tensor &ret, float alpha,
ones.getData(), 1, beta, ret.getData(), 1);
} break;
case 1: {
CREATE_IF_EMPTY_DIMS(ret, dim.batch(), 1, dim.height(), dim.width(),getTensorType());
CREATE_IF_EMPTY_DIMS(ret, dim.batch(), 1, dim.height(), dim.width(),
getTensorType());
unsigned int feat_len = dim.height() * dim.width();
unsigned int channel = dim.channel();
Tensor ones(1, 1, 1, channel);
Expand All @@ -1155,7 +1156,8 @@ Tensor &Tensor::sum(unsigned int axis, Tensor &ret, float alpha,
}
} break;
case 2: {
CREATE_IF_EMPTY_DIMS(ret, dim.batch(), dim.channel(), 1, dim.width(),getTensorType());
CREATE_IF_EMPTY_DIMS(ret, dim.batch(), dim.channel(), 1, dim.width(),
getTensorType());
unsigned int width = dim.width();
unsigned int height = dim.height();
Tensor ones(1, 1, 1, height);
Expand All @@ -1172,7 +1174,8 @@ Tensor &Tensor::sum(unsigned int axis, Tensor &ret, float alpha,
}
} break;
case 3: {
CREATE_IF_EMPTY_DIMS(ret, dim.batch(), dim.channel(), dim.height(), 1, getTensorType());
CREATE_IF_EMPTY_DIMS(ret, dim.batch(), dim.channel(), dim.height(), 1,
getTensorType());
unsigned int m = ret.dim.getDataLen();
unsigned int n = dim.width();
Tensor ones(1, 1, 1, n);
Expand Down Expand Up @@ -1356,7 +1359,8 @@ Tensor &Tensor::dot(Tensor const &m, Tensor &result, bool trans, bool trans_m,
K = mdim1; /** == dim2 */
N = mdim2;
M = dim1;
CREATE_IF_EMPTY_DIMS(result, batch(), channel(), height(), N, getTensorType());
CREATE_IF_EMPTY_DIMS(result, batch(), channel(), height(), N,
getTensorType());

// We are not set zero the result because of performnace reason.
// However, result is not initialized properly. There might include
Expand All @@ -1370,7 +1374,8 @@ Tensor &Tensor::dot(Tensor const &m, Tensor &result, bool trans, bool trans_m,
K = mdim2; /** == dim2 */
N = mdim1;
M = dim1;
CREATE_IF_EMPTY_DIMS(result, batch(), channel(), height(), N, getTensorType());
CREATE_IF_EMPTY_DIMS(result, batch(), channel(), height(), N,
getTensorType());
} else if (trans && !trans_m) {
if (dim1 != mdim1)
throw std::runtime_error(
Expand All @@ -1386,7 +1391,7 @@ Tensor &Tensor::dot(Tensor const &m, Tensor &result, bool trans, bool trans_m,
K = mdim2; /** == dim1 */
N = mdim1;
M = dim2;
CREATE_IF_EMPTY_DIMS(result, 1, 1, M, N,getTensorType());
CREATE_IF_EMPTY_DIMS(result, 1, 1, M, N, getTensorType());
}
lda = dim2;
ldb = mdim2;
Expand Down Expand Up @@ -1611,35 +1616,35 @@ Tensor &Tensor::apply(std::function<Tensor &(Tensor, Tensor &)> f,

void Tensor::print(std::ostream &out) const {
printInstance(out, this);
if (getDataType() == ml::train::TensorDim::DataType::FP32){
if (getDataType() == ml::train::TensorDim::DataType::FP32) {
const __fp16 *data = getData<__fp16>();
unsigned int len = size();
out << "data addr: " << data << '\n';
out << dim;
unsigned int len = size();
out << "data addr: " << data << '\n';
out << dim;

if (len > 100) {
out << '[' << data[0] << ' ' << data[1] << ' ' << data[2] << " ... "
<< data[len - 3] << ' ' << data[len - 2] << ' ' << data[len - 1] << ']'
<< std::endl;
return;
}
if (len > 100) {
out << '[' << data[0] << ' ' << data[1] << ' ' << data[2] << " ... "
<< data[len - 3] << ' ' << data[len - 2] << ' ' << data[len - 1]
<< ']' << std::endl;
return;
}

std::ios init(NULL);
init.copyfmt(out);
for (unsigned int k = 0; k < dim.batch(); k++) {
for (unsigned int l = 0; l < dim.channel(); l++) {
for (unsigned int i = 0; i < dim.height(); i++) {
for (unsigned int j = 0; j < dim.width(); j++) {
out << std::setw(10) << std::setprecision(10)
<< this->getValue<float>(k, l, i, j) << " ";
std::ios init(NULL);
init.copyfmt(out);
for (unsigned int k = 0; k < dim.batch(); k++) {
for (unsigned int l = 0; l < dim.channel(); l++) {
for (unsigned int i = 0; i < dim.height(); i++) {
for (unsigned int j = 0; j < dim.width(); j++) {
out << std::setw(10) << std::setprecision(10)
<< this->getValue<float>(k, l, i, j) << " ";
}
out << std::endl;
}
out << std::endl;
}
out << std::endl;
out << "-------" << std::endl;
}
out << "-------" << std::endl;
}
out.copyfmt(init);
out.copyfmt(init);
} else if (getDataType() == ml::train::TensorDim::DataType::FP16) {
const __fp16 *data = getData<__fp16>();
unsigned int len = size();
Expand Down Expand Up @@ -1688,10 +1693,10 @@ void Tensor::copy(const void *buf) {
// (getDataType() == ml::train::TensorDim::DataType::FP16) ? "FP16" : "NO";
// std::cout << type_ << std::endl;

if(getDataType() == ml::train::TensorDim::DataType::FP16){
scopy(size(), (__fp16*)buf, 1, getData<__fp16>(), 1);
}else if(getDataType() == ml::train::TensorDim::DataType::FP32){
scopy(size(), (float*)buf, 1, getData<float>(), 1);
if (getDataType() == ml::train::TensorDim::DataType::FP16) {
scopy(size(), (__fp16 *)buf, 1, getData<__fp16>(), 1);
} else if (getDataType() == ml::train::TensorDim::DataType::FP32) {
scopy(size(), (float *)buf, 1, getData<float>(), 1);
}
}

Expand Down
132 changes: 132 additions & 0 deletions test/jni/Android.mk
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
LOCAL_PATH := $(call my-dir)

include $(CLEAR_VARS)

# ndk path
ifndef ANDROID_NDK
$(error ANDROID_NDK is not defined!)
endif

ifndef NNTRAINER_ROOT
NNTRAINER_ROOT := $(LOCAL_PATH)/../../..
endif

ML_API_COMMON_INCLUDES := ${NNTRAINER_ROOT}/ml_api_common/include
NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer \
$(NNTRAINER_ROOT)/nntrainer/dataset \
$(NNTRAINER_ROOT)/nntrainer/models \
$(NNTRAINER_ROOT)/nntrainer/layers \
$(NNTRAINER_ROOT)/nntrainer/compiler \
$(NNTRAINER_ROOT)/nntrainer/graph \
$(NNTRAINER_ROOT)/nntrainer/optimizers \
$(NNTRAINER_ROOT)/nntrainer/tensor \
$(NNTRAINER_ROOT)/nntrainer/utils \
$(NNTRAINER_ROOT)/api \
$(NNTRAINER_ROOT)/api/ccapi/include \
${ML_API_COMMON_INCLUDES}

LOCAL_MODULE := nntrainer
LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/libs/$(TARGET_ARCH_ABI)/libnntrainer.so

include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)

LOCAL_MODULE := ccapi-nntrainer
LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/libs/$(TARGET_ARCH_ABI)/libccapi-nntrainer.so

include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)
GTEST_PATH := googletest

LOCAL_MODULE := googletest_main
LOCAL_CFLAGS := -Igoogletest/include -Igoogletest/
LOCAL_CXXFLAGS += -std=c++17 -frtti -fexceptions

LOCAL_SRC_FILES := \
$(GTEST_PATH)/src/gtest-all.cc

include $(BUILD_STATIC_LIBRARY)

include $(CLEAR_VARS)

LOCAL_MODULE := test_util
LOCAL_CFLAGS := -Igoogletest/include -I../include -pthread -fexceptions -fopenmp -static-openmp -DMIN_CPP_VERSION=201703L -DNNTR_NUM_THREADS=1 -D__LOGGING__=1 -DENABLE_TEST=1 -DREDUCE_TOLERANCE=1 -march=armv8.2-a+fp16 -mfpu=neon-fp16 -mfloat-abi=softfp -O3 -frtti
LOCAL_CXXFLAGS += -std=c++17 -frtti -fexceptions
LOCAL_LDLIBS := -llog -landroid -fopenmp -static-openmp

LOCAL_C_INCLUDES += $(NNTRAINER_INCLUDES) ../include

LOCAL_SRC_FILES := ../nntrainer_test_util.cpp

include $(BUILD_STATIC_LIBRARY)


include $(CLEAR_VARS)

LOCAL_MODULE := unittest_nntrainer_tensor
LOCAL_CFLAGS := -Igoogletest/include -I../include -pthread -fexceptions -fopenmp -static-openmp -DMIN_CPP_VERSION=201703L -DNNTR_NUM_THREADS=1 -D__LOGGING__=1 -DENABLE_TEST=1 -DREDUCE_TOLERANCE=1 -march=armv8.2-a+fp16 -mfpu=neon-fp16 -mfloat-abi=softfp -O3 -frtti
LOCAL_CXXFLAGS += -std=c++17 -frtti -fexceptions
LOCAL_LDLIBS := -llog -landroid -fopenmp -static-openmp

LOCAL_SRC_FILES := \
../unittest/unittest_nntrainer_tensor.cpp

LOCAL_C_INCLUDES += $(NNTRAINER_INCLUDES)

LOCAL_SHARED_LIBRARIES := nntrainer ccapi-nntrainer
LOCAL_STATIC_LIBRARIES := googletest_main test_util
include $(BUILD_EXECUTABLE)

include $(CLEAR_VARS)

LOCAL_MODULE := unittest_nntrainer_tensor_fp16
LOCAL_CFLAGS := -Igoogletest/include -I../include -pthread -fexceptions -fopenmp -static-openmp -DMIN_CPP_VERSION=201703L -DNNTR_NUM_THREADS=1 -D__LOGGING__=1 -DENABLE_TEST=1 -DREDUCE_TOLERANCE=1 -march=armv8.2-a+fp16 -mfpu=neon-fp16 -mfloat-abi=softfp -O3 -frtti
LOCAL_CXXFLAGS += -std=c++17 -frtti -fexceptions
LOCAL_LDLIBS := -llog -landroid -fopenmp -static-openmp

LOCAL_SRC_FILES := \
../unittest/unittest_nntrainer_tensor_fp16.cpp

LOCAL_C_INCLUDES += $(NNTRAINER_INCLUDES)

LOCAL_SHARED_LIBRARIES := nntrainer ccapi-nntrainer
LOCAL_STATIC_LIBRARIES := googletest_main test_util
include $(BUILD_EXECUTABLE)


include $(CLEAR_VARS)

LOCAL_MODULE := unittest_ccapi
LOCAL_CFLAGS := -Igoogletest/include -I../include -pthread -fexceptions -fopenmp -static-openmp -DMIN_CPP_VERSION=201703L -DNNTR_NUM_THREADS=1 -D__LOGGING__=1 -DENABLE_TEST=1 -DREDUCE_TOLERANCE=1 -march=armv8.2-a+fp16 -mfpu=neon-fp16 -mfloat-abi=softfp -O3 -frtti
LOCAL_CXXFLAGS += -std=c++17 -frtti -fexceptions
LOCAL_LDLIBS := -llog -landroid -fopenmp -static-openmp

LOCAL_SRC_FILES := \
../ccapi/unittest_ccapi.cpp

LOCAL_C_INCLUDES += $(NNTRAINER_INCLUDES)

LOCAL_SHARED_LIBRARIES := nntrainer ccapi-nntrainer
LOCAL_STATIC_LIBRARIES := googletest_main test_util
include $(BUILD_EXECUTABLE)

include $(CLEAR_VARS)

LOCAL_MODULE := unittest_compiler
LOCAL_CFLAGS := -Igoogletest/include -I../include -I../unittest/compiler -pthread -fexceptions -fopenmp -static-openmp -DMIN_CPP_VERSION=201703L -DNNTR_NUM_THREADS=1 -D__LOGGING__=1 -DENABLE_TEST=1 -DREDUCE_TOLERANCE=1 -march=armv8.2-a+fp16 -mfpu=neon-fp16 -mfloat-abi=softfp -O3 -frtti -DNDK_BUILD=1
LOCAL_CXXFLAGS += -std=c++17 -frtti -fexceptions
LOCAL_LDLIBS := -llog -landroid -fopenmp -static-openmp

LOCAL_SRC_FILES := \
../unittest/compiler/compiler_test_util.cpp \
../unittest/compiler/unittest_compiler.cpp \
../unittest/compiler/unittest_realizer.cpp \

LOCAL_C_INCLUDES += $(NNTRAINER_INCLUDES)

LOCAL_SHARED_LIBRARIES := nntrainer ccapi-nntrainer
LOCAL_STATIC_LIBRARIES := googletest_main test_util
include $(BUILD_EXECUTABLE)

3 changes: 3 additions & 0 deletions test/jni/Application.mk
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
APP_ABI := arm64-v8a
APP_STL := c++_shared
APP_PLATFORM := android-29
Loading

0 comments on commit 4356071

Please sign in to comment.