Skip to content

Commit

Permalink
[Tizen7.0] Tizen7.0 Backporting
Browse files Browse the repository at this point in the history
- This commit adds some updates for Tizen7.0 backporting
- Type mismatch bug is fixed.
- Unused variable is removed.
- Missing header files are added in spec file.
- spec file is updated

Self evaluation:

Build test: [X]Passed [ ]Failed [ ]Skipped
Run test: [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Eunju Yang <[email protected]>
  • Loading branch information
EunjuYang authored and jijoongmoon committed Aug 29, 2024
1 parent 80c9855 commit 6d6e924
Show file tree
Hide file tree
Showing 10 changed files with 32 additions and 16 deletions.
3 changes: 3 additions & 0 deletions api/ccapi/include/layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,9 @@ class Layer {
*/
virtual const std::string getType() const = 0;

/**
* @brief Initialize layer
*/
virtual void initialize() = 0;

/**
Expand Down
1 change: 1 addition & 0 deletions debian/nntrainer-dev.install
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
/usr/include/nntrainer/layer_context.h
/usr/include/nntrainer/layer_devel.h
/usr/include/nntrainer/layer_impl.h
/usr/include/nntrainer/acti_func.h
# custom layer kits
/usr/include/nntrainer/app_context.h
# logger
Expand Down
1 change: 1 addition & 0 deletions meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ if get_option('enable-fp16')
# comaptible with armv8.0 machines.
if cxx.has_argument('-mfp16-format=ieee')
add_project_arguments('-mfp16-format=ieee', language: ['c', 'cpp'])
add_project_arguments('-march=armv8.2-a+fp16', language: ['c', 'cpp'])
else
message ('The compiler does not support -mfp16-format=ieee. However, according to https://gcc.gnu.org/onlinedocs/gcc-9.1.0/gcc/Half-Precision.html, gcc may use IEEE fp16 anyway. Thus, we will proceed without the option for FP16 support.')
endif
Expand Down
2 changes: 1 addition & 1 deletion nnstreamer/meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@ if get_option('enable-nnstreamer-tensor-filter').enabled()
subdir('tensor_filter')
endif
if get_option('enable-nnstreamer-tensor-trainer').enabled()
# subdir('tensor_trainer')
subdir('tensor_trainer')
endif
5 changes: 3 additions & 2 deletions nntrainer/layers/layer_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -541,9 +541,10 @@ bool RunLayerContext::validate(bool skip_input, bool skip_label) {
} else if (val->getVariableRef().getTensorType().data_type ==
TensorDim::DataType::FP16) {
#ifdef ENABLE_FP16
tensor_map[val->getName()] = val->getVariableRef().getData<_FP16>();
tensor_map[val->getName()] =
val->getVariableRef().template getData<_FP16>();
tensor_map[val->getGradientName()] =
val->getGradientRef().getData<_FP16>();
val->getGradientRef().template getData<_FP16>();
#else
throw std::invalid_argument("Error: enable-fp16 is not enabled");
#endif
Expand Down
3 changes: 0 additions & 3 deletions nntrainer/layers/layer_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -438,9 +438,6 @@ class RunLayerContext {
d.setDataType(o_t);
w = Tensor(d, true);
}
unsigned int o_ax = getWeightObject(idx).getOutputAxis();

// t_w.dequantize(w, o_ax);

return;
}
Expand Down
3 changes: 3 additions & 0 deletions nntrainer/layers/layer_devel.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,9 @@ class Layer {
*/
virtual void finalize(InitLayerContext &context) = 0;

/**
* @brief Initialize the layer
*/
virtual void initialize(RunLayerContext &context){};

/**
Expand Down
1 change: 1 addition & 0 deletions nntrainer/layers/meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ layer_headers = [
'layer_context.h',
'layer_devel.h',
'layer_impl.h',
'acti_func.h',
'common_properties.h',
]

Expand Down
8 changes: 4 additions & 4 deletions nntrainer/tensor/hgemm/hgemm_pack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -367,10 +367,10 @@ void packing_B8(unsigned int K, unsigned int N, const __fp16 *src,
unsigned int ldb, const __fp16 *dst) {
assert(K != 0 && N != 0 && N % 8 == 0);

for (int i = 0; i < K; i++) {
for (unsigned int i = 0; i < K; i++) {
const __fp16 *a_off = src + i * ldb;
__fp16 *b_off = (__fp16 *)dst + i * 8;
for (int j = 0; j < N; j += 8) {
for (unsigned int j = 0; j < N; j += 8) {
float16x8_t v = vld1q_f16(a_off);
a_off += 8;

Expand All @@ -384,10 +384,10 @@ void packing_B16(unsigned int K, unsigned int N, const __fp16 *src,
unsigned int ldb, const __fp16 *dst) {
assert(K != 0 && N != 0 && N % 16 == 0);

for (int i = 0; i < K; i++) {
for (unsigned int i = 0; i < K; i++) {
const __fp16 *a_off = src + i * ldb;
__fp16 *b_off = (__fp16 *)dst + i * 16;
for (int j = 0; j < N; j += 16) {
for (unsigned int j = 0; j < N; j += 16) {
float16x8_t v0_7 = vld1q_f16(a_off);
float16x8_t v8_15 = vld1q_f16(a_off + 8);
a_off += 16;
Expand Down
21 changes: 15 additions & 6 deletions packaging/nntrainer.spec
Original file line number Diff line number Diff line change
Expand Up @@ -131,13 +131,13 @@ BuildRequires: tensorflow2-lite-devel
BuildRequires: tensorflow2-lite-devel
%endif # support_tflite_interpreter

%define enable_nnstreamer_tensor_filter -Denable-nnstreamer-tensor-filter=false
%define enable_nnstreamer_tensor_trainer -Denable-nnstreamer-tensor-trainer=false
%define enable_nnstreamer_tensor_filter -Denable-nnstreamer-tensor-filter=disabled
%define enable_nnstreamer_tensor_trainer -Denable-nnstreamer-tensor-trainer=disabled

%if 0%{?nnstreamer_filter}
Requires: nnstreamer-nntrainer = %{version}-%{release}
BuildRequires: nnstreamer-devel
%define enable_nnstreamer_tensor_filter -Denable-nnstreamer-tensor-filter=true
%define enable_nnstreamer_tensor_filter -Denable-nnstreamer-tensor-filter=enabled

%if 0%{?unit_test}
%if 0%{tizen_version_major}%{tizen_version_minor} > 60
Expand All @@ -151,7 +151,7 @@ BuildRequires: python
%if 0%{?nnstreamer_trainer}
Requires: nnstreamer-nntrainer = %{version}-%{release}
BuildRequires: nnstreamer-devel
%define enable_nnstreamer_tensor_trainer -Denable-nnstreamer-tensor-trainer=true
%define enable_nnstreamer_tensor_trainer -Denable-nnstreamer-tensor-trainer=enabled
%endif # nnstreamer_trainer
%endif # tizen

Expand Down Expand Up @@ -413,8 +413,8 @@ meson --buildtype=plain --prefix=%{_prefix} --sysconfdir=%{_sysconfdir} \
%{enable_profile} %{enable_nnstreamer_backbone} %{enable_tflite_backbone} \
%{enable_tflite_interpreter} %{capi_ml_pkg_dep_resolution} \
%{enable_reduce_tolerance} %{configure_subplugin_install_path} %{enable_debug} \
-Dml-api-support=enabled -Denable-nnstreamer-tensor-filter=enabled \
-Denable-nnstreamer-tensor-trainer=enabled -Denable-capi=enabled \
-Dml-api-support=enabled \
-Denable-capi=enabled \
%{fp16_support} %{neon_support} build

ninja -C build %{?_smp_mflags}
Expand Down Expand Up @@ -565,9 +565,18 @@ cp -r result %{buildroot}%{_datadir}/nntrainer/unittest/
%{_includedir}/nntrainer/util_func.h
%{_includedir}/nntrainer/fp16.h
%{_includedir}/nntrainer/util_simd.h
# In the current version, Neon SIMD is enabled only when FP16 is enabled with AArch64.
# This may be subject to change in future versions.
%ifarch aarch64
%if 0%{?enable_fp16}
%{_includedir}/nntrainer/util_simd_neon.h
%{_includedir}/nntrainer/blas_neon.h
%{_includedir}/nntrainer/hgemm.h
%{_includedir}/nntrainer/hgemm_util.h
%endif
%endif
%{_includedir}/nntrainer/acti_func.h


%files devel-static
%{_libdir}/libnntrainer*.a
Expand Down

0 comments on commit 6d6e924

Please sign in to comment.